mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
198 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5662919f72 | ||
|
|
445d0f870e | ||
|
|
81e74fe830 | ||
|
|
50791ad51a | ||
|
|
6a65657e27 | ||
|
|
1c7190884a | ||
|
|
e0fcc3bfa6 | ||
|
|
504b5a8eb0 | ||
|
|
488eaa9bef | ||
|
|
676c3703aa | ||
|
|
deec4df125 | ||
|
|
eedc8e81d0 | ||
|
|
28456ffafe | ||
|
|
a7c3ae37aa | ||
|
|
d043bcf7be | ||
|
|
72a1c59cac | ||
|
|
6299b1d8e9 | ||
|
|
11ae057b0a | ||
|
|
d34c99baf4 | ||
|
|
b7e83b74d8 | ||
|
|
919f75bb62 | ||
|
|
16079bd1d4 | ||
|
|
401d4227d1 | ||
|
|
7f9f32ca58 | ||
|
|
6937b8120b | ||
|
|
be80ce35b2 | ||
|
|
99349ce361 | ||
|
|
3a1b808169 | ||
|
|
798079eb53 | ||
|
|
98749f2c9b | ||
|
|
3a675696cd | ||
|
|
632eeaa527 | ||
|
|
9c85ce404b | ||
|
|
f8f8afca88 | ||
|
|
11dc048709 | ||
|
|
328442c121 | ||
|
|
4304880b6b | ||
|
|
aef99a7bb8 | ||
|
|
e387b21ed6 | ||
|
|
b6de33d501 | ||
|
|
48160e0414 | ||
|
|
d998c1a19e | ||
|
|
0b7ca82218 | ||
|
|
88fe843285 | ||
|
|
cf31fed9d3 | ||
|
|
ba7f7537fb | ||
|
|
499ecf9da5 | ||
|
|
6188811b94 | ||
|
|
0f2395309f | ||
|
|
41d4fa56ca | ||
|
|
bf21677357 | ||
|
|
c58d0965d5 | ||
|
|
ec1f7a68de | ||
|
|
68a353d097 | ||
|
|
c82886c921 | ||
|
|
4b9844f5c3 | ||
|
|
1e4016587c | ||
|
|
198e5631d4 | ||
|
|
f21ad29188 | ||
|
|
cd7ec500a7 | ||
|
|
030e51351a | ||
|
|
8703c150e0 | ||
|
|
1d7192180f | ||
|
|
9d1733200a | ||
|
|
841db3c829 | ||
|
|
1155c981d6 | ||
|
|
d29c99ffef | ||
|
|
3b572568be | ||
|
|
3e5b1d09b8 | ||
|
|
61b7a88462 | ||
|
|
d9d986f1f2 | ||
|
|
42a25bfe64 | ||
|
|
df667b9c15 | ||
|
|
f1c71731d9 | ||
|
|
e2b14ab7b5 | ||
|
|
4592bd4331 | ||
|
|
e380fbaf03 | ||
|
|
2bce1352a3 | ||
|
|
5b3b617f06 | ||
|
|
5040a4236a | ||
|
|
f6cbc005fc | ||
|
|
91774c941f | ||
|
|
db0e127563 | ||
|
|
3671d33447 | ||
|
|
e85176b5a7 | ||
|
|
7941de60ac | ||
|
|
565612e421 | ||
|
|
31e247e3ae | ||
|
|
1e79b86f72 | ||
|
|
b5b684c67b | ||
|
|
bbc6bd2dea | ||
|
|
61ce150d7c | ||
|
|
56e83ecde9 | ||
|
|
c33876508c | ||
|
|
55ea2e430e | ||
|
|
4beefc3f43 | ||
|
|
3b1d30141c | ||
|
|
fa75df8e96 | ||
|
|
21087aaddc | ||
|
|
766bc24241 | ||
|
|
8e3aad3b0e | ||
|
|
ce2866bf6a | ||
|
|
bcbaad8495 | ||
|
|
3346319082 | ||
|
|
139aa43c1c | ||
|
|
11fdd40e41 | ||
|
|
c4ce86cb0b | ||
|
|
dfe7e9b3ca | ||
|
|
1c29bfc084 | ||
|
|
c48e2bb8bb | ||
|
|
df40b5d02e | ||
|
|
aa26a2222b | ||
|
|
f9d1a967c7 | ||
|
|
b2e1d3f0dd | ||
|
|
24478a9dd4 | ||
|
|
160525bd1f | ||
|
|
d9158ab602 | ||
|
|
2b4cc64026 | ||
|
|
ccd7dcb867 | ||
|
|
5c5c555a7f | ||
|
|
273e4768f3 | ||
|
|
69e359e9fc | ||
|
|
e5352df348 | ||
|
|
f2b4e8e6c6 | ||
|
|
99a38bff8e | ||
|
|
d0aa627715 | ||
|
|
953cbe9d28 | ||
|
|
f7873aba7b | ||
|
|
f9728ecfff | ||
|
|
96a44153de | ||
|
|
cafbcbd2cb | ||
|
|
6397a35e32 | ||
|
|
aea8592880 | ||
|
|
2aa514a34c | ||
|
|
ac39bc4eba | ||
|
|
284d21686e | ||
|
|
00c0c11c76 | ||
|
|
96ebfa8e62 | ||
|
|
95d442d80f | ||
|
|
e4e58882ab | ||
|
|
ea71fc0eec | ||
|
|
462b225d92 | ||
|
|
d8728092f8 | ||
|
|
2c8ef70c43 | ||
|
|
4d2c8a451e | ||
|
|
f7927c85b1 | ||
|
|
2e2fd2a11b | ||
|
|
0e6ec1d36b | ||
|
|
85b33d9104 | ||
|
|
c838ecbbc7 | ||
|
|
068a5c1e64 | ||
|
|
4d559a1864 | ||
|
|
322142dd66 | ||
|
|
39f37b706c | ||
|
|
4e10dd4f80 | ||
|
|
ccaa600ff4 | ||
|
|
a3fcfeb62f | ||
|
|
d2cbbafeb1 | ||
|
|
eaf8e16414 | ||
|
|
5a65cf9f6d | ||
|
|
a8a68ae1b0 | ||
|
|
7643a27fb1 | ||
|
|
71fdb53c2e | ||
|
|
d6312f6f83 | ||
|
|
19220f5e6e | ||
|
|
05456b0905 | ||
|
|
10328dee8d | ||
|
|
fd174ed691 | ||
|
|
2e47f1740c | ||
|
|
15cb96f945 | ||
|
|
1e987db54d | ||
|
|
12a7fed3ae | ||
|
|
f18fac66c2 | ||
|
|
b5c95f9cbf | ||
|
|
46b948388f | ||
|
|
78be58b090 | ||
|
|
54a8e0683b | ||
|
|
702f0caa93 | ||
|
|
2e709e85ae | ||
|
|
debfd57a91 | ||
|
|
c3b8af34ac | ||
|
|
7a65bcb35b | ||
|
|
af6cd9e37c | ||
|
|
344004d0b3 | ||
|
|
a5bc586f09 | ||
|
|
81ca7ab601 | ||
|
|
69c9ccb2ea | ||
|
|
0ec3effab8 | ||
|
|
dba42e91bc | ||
|
|
68fd3bebe5 | ||
|
|
52b975ef0d | ||
|
|
0679af76f4 | ||
|
|
309c10f632 | ||
|
|
07ddec9fd1 | ||
|
|
69a80fd1d9 | ||
|
|
04975de060 | ||
|
|
459a808371 | ||
|
|
ef8a335c93 |
25
.github/workflows/pull_request.yaml
vendored
25
.github/workflows/pull_request.yaml
vendored
@@ -7,9 +7,9 @@ on:
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.15.2
|
||||
GOLANG_VERSION: 1.18.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.7.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -19,7 +19,9 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
@@ -34,16 +36,21 @@ jobs:
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
version: v1.45.2
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Helm Lint
|
||||
run: |
|
||||
cd deployments/kubernetes/chart/reloader
|
||||
helm lint
|
||||
|
||||
- name: Link check
|
||||
uses: gaurav-nelson/github-action-markdown-link-check@v1
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
@@ -111,7 +118,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
with:
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ github.repository }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
allow-repeats: false
|
||||
|
||||
- name: Notify Failure
|
||||
@@ -131,4 +138,4 @@ jobs:
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
|
||||
27
.github/workflows/push.yaml
vendored
27
.github/workflows/push.yaml
vendored
@@ -7,9 +7,9 @@ on:
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.15.2
|
||||
GOLANG_VERSION: 1.18.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.7.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
|
||||
jobs:
|
||||
@@ -39,11 +39,13 @@ jobs:
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
version: v1.45.2
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
@@ -68,7 +70,7 @@ jobs:
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
@@ -114,7 +116,7 @@ jobs:
|
||||
# Generate tag for operator without "v"
|
||||
- name: Generate Operator Tag
|
||||
id: generate_operator_tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: false
|
||||
@@ -127,6 +129,11 @@ jobs:
|
||||
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
|
||||
run: make bump-chart
|
||||
|
||||
- name: Helm Template
|
||||
run: |
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests/ && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
|
||||
|
||||
# Publish helm chart
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
@@ -158,7 +165,7 @@ jobs:
|
||||
branch: ${{ github.ref }}
|
||||
|
||||
- name: Push Latest Tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
|
||||
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: 1.15.2
|
||||
GOLANG_VERSION: 1.18.2
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -8,4 +8,5 @@ _gopath/
|
||||
.DS_Store
|
||||
.vscode
|
||||
vendor
|
||||
dist
|
||||
dist
|
||||
Reloader
|
||||
22
Dockerfile
22
Dockerfile
@@ -1,5 +1,13 @@
|
||||
ARG BUILDER_IMAGE
|
||||
ARG BASE_IMAGE
|
||||
|
||||
# Build the manager binary
|
||||
FROM golang:1.15.2 as builder
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.18.2} as builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG GOPROXY
|
||||
ARG GOPRIVATE
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -16,14 +24,20 @@ COPY internal/ internal/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=mod -a -o manager main.go
|
||||
RUN CGO_ENABLED=0 \
|
||||
GOOS=${TARGETOS} \
|
||||
GOARCH=${TARGETARCH} \
|
||||
GOPROXY=${GOPROXY} \
|
||||
GOPRIVATE=${GOPRIVATE} \
|
||||
GO111MODULE=on \
|
||||
go build -mod=mod -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
FROM ${BASE_IMAGE:-gcr.io/distroless/static:nonroot}
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER nonroot:nonroot
|
||||
USER 65532:65532
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
59
Makefile
59
Makefile
@@ -1,48 +1,57 @@
|
||||
# note: call scripts from /scripts
|
||||
|
||||
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy release release-all manifest push clean-image
|
||||
.PHONY: default build build-image test stop push apply deploy release release-all manifest push
|
||||
|
||||
OS ?= linux
|
||||
ARCH ?= ???
|
||||
ALL_ARCH ?= arm64 arm amd64
|
||||
|
||||
BUILDER ?= reloader-builder-${ARCH}
|
||||
BUILDER_IMAGE ?=
|
||||
BASE_IMAGE ?=
|
||||
BINARY ?= Reloader
|
||||
DOCKER_IMAGE ?= stakater/reloader
|
||||
# Default value "dev"
|
||||
TAG ?= v0.0.75.0
|
||||
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${TAG}
|
||||
REPOSITORY_ARCH = ${DOCKER_IMAGE}:${TAG}-${ARCH}
|
||||
|
||||
# Default value "dev"
|
||||
VERSION ?= 0.0.1
|
||||
|
||||
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${VERSION}
|
||||
REPOSITORY_ARCH = ${DOCKER_IMAGE}:v${VERSION}-${ARCH}
|
||||
BUILD=
|
||||
|
||||
GOCMD = go
|
||||
GOFLAGS ?= $(GOFLAGS:)
|
||||
LDFLAGS =
|
||||
GOPROXY ?=
|
||||
GOPRIVATE ?=
|
||||
|
||||
default: build test
|
||||
|
||||
install:
|
||||
"$(GOCMD)" mod download
|
||||
|
||||
run:
|
||||
go run ./main.go
|
||||
|
||||
build:
|
||||
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
|
||||
|
||||
builder-image:
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
|
||||
reloader-${ARCH}.tar:
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
docker run --platform ${OS}/${ARCH} --rm "${BUILDER}" > reloader-${ARCH}.tar
|
||||
|
||||
binary-image: builder-image
|
||||
cat reloader-${ARCH}.tar | docker buildx build --platform ${OS}/${ARCH} -t "${REPOSITORY_ARCH}" --load -f Dockerfile.run -
|
||||
build-image:
|
||||
docker buildx build \
|
||||
--platform ${OS}/${ARCH} \
|
||||
--build-arg GOARCH=$(ARCH) \
|
||||
--build-arg BUILDER_IMAGE=$(BUILDER_IMAGE) \
|
||||
--build-arg BASE_IMAGE=${BASE_IMAGE} \
|
||||
--build-arg GOPROXY=${GOPROXY} \
|
||||
--build-arg GOPRIVATE=${GOPRIVATE} \
|
||||
-t "${REPOSITORY_ARCH}" \
|
||||
--load \
|
||||
-f Dockerfile \
|
||||
.
|
||||
|
||||
push:
|
||||
docker push ${REPOSITORY_ARCH}
|
||||
|
||||
release: binary-image push manifest
|
||||
release: build-image push manifest
|
||||
|
||||
release-all:
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
@@ -66,23 +75,6 @@ test:
|
||||
stop:
|
||||
@docker stop "${BINARY}"
|
||||
|
||||
clean-images: stop
|
||||
-docker rmi "${BINARY}"
|
||||
@for arch in $(ALL_ARCH) ; do \
|
||||
echo Clean image: $$arch ; \
|
||||
make clean-image ARCH=$$arch ; \
|
||||
done
|
||||
-docker rmi "${REPOSITORY_GENERIC}"
|
||||
|
||||
clean-image:
|
||||
-docker rmi "${BUILDER}"
|
||||
-docker rmi "${REPOSITORY_ARCH}"
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
|
||||
clean:
|
||||
"$(GOCMD)" clean -i
|
||||
-rm -rf reloader-*.tar
|
||||
|
||||
apply:
|
||||
kubectl apply -f deployments/manifests/ -n temp-reloader
|
||||
|
||||
@@ -93,3 +85,4 @@ bump-chart:
|
||||
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
sed -i "s/version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
|
||||
48
README.md
48
README.md
@@ -6,8 +6,6 @@
|
||||
[](https://github.com/stakater/reloader/releases/latest)
|
||||
[](https://hub.docker.com/r/stakater/reloader/)
|
||||
[](https://hub.docker.com/r/stakater/reloader/)
|
||||
[](https://microbadger.com/images/stakater/reloader)
|
||||
[](https://microbadger.com/images/stakater/reloader)
|
||||
[](LICENSE)
|
||||
[](http://stakater.com/?utm_source=Reloader&utm_medium=github)
|
||||
|
||||
@@ -33,7 +31,8 @@ metadata:
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
@@ -88,7 +87,8 @@ metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
Use comma separated list to define multiple configmaps.
|
||||
@@ -99,7 +99,8 @@ metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### Secret
|
||||
@@ -114,7 +115,8 @@ metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
Use comma separated list to define multiple secrets.
|
||||
@@ -125,7 +127,8 @@ metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### NOTES
|
||||
@@ -142,6 +145,19 @@ spec:
|
||||
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
|
||||
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
|
||||
- you can configure logging in JSON format with the `--log-format=json` option
|
||||
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
|
||||
|
||||
## Reload Strategies
|
||||
Reloader supports multiple "reload" strategies for performing rolling upgrades to resources. The following list describes them:
|
||||
- **env-vars**: When a tracked `configMap`/`secret` is updated, this strategy attaches a Reloader specific environment variable to any containers
|
||||
referencing the changed `configMap` or `secret` on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.).
|
||||
This strategy can be specified with the `--reload-strategy=env-vars` argument. Note: This is the default reload strategy.
|
||||
- **annotations**: When a tracked `configMap`/`secret` is updated, this strategy attaches a `reloader.stakater.com/last-reloaded-from` pod template annotation
|
||||
on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy is useful when using resource syncing tools like ArgoCD, since it will not cause these tools
|
||||
to detect configuration drift after a resource is reloaded. Note: Since the attached pod template annotation only tracks the last reload source, this strategy will reload any tracked resource should its
|
||||
`configMap` or `secret` be deleted and recreated.
|
||||
This strategy can be specified with the `--reload-strategy=annotations` argument.
|
||||
|
||||
|
||||
## Deploying to Kubernetes
|
||||
|
||||
@@ -219,7 +235,23 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
|
||||
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` to `true` in values.yaml file.
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
|
||||
|
||||
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
|
||||
|
||||
| Parameter | Description | Type |
|
||||
| ---------------- |------------------------------------------------------------------------------| ------- |
|
||||
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
|
||||
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
|
||||
| reloadOnCreate | Enable reload on create events. Valid value are either `true` or `false` | boolean |
|
||||
|
||||
**ReloadOnCreate** reloadOnCreate controls how Reloader handles secrets being added to the cache for the first time. If reloadOnCreate is set to true:
|
||||
* Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload.
|
||||
* When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload.
|
||||
* If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected.
|
||||
|
||||
If ReloadOnCreate is set to false:
|
||||
* Updates to configMaps/Secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration.
|
||||
|
||||
## Help
|
||||
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
FROM golang:1.15.2-alpine
|
||||
LABEL maintainer "Stakater Team"
|
||||
|
||||
ARG GOARCH=amd64
|
||||
|
||||
RUN apk -v --update \
|
||||
--no-cache \
|
||||
add git build-base
|
||||
|
||||
WORKDIR "$GOPATH/src/github.com/stakater/Reloader"
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0 GOOS=linux GOARCH=$GOARCH
|
||||
|
||||
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
|
||||
|
||||
COPY build/package/Dockerfile.run /
|
||||
|
||||
# Running this image produces a tarball suitable to be piped into another
|
||||
# Docker build command.
|
||||
CMD tar -cf - -C / Dockerfile.run Reloader
|
||||
@@ -1,14 +0,0 @@
|
||||
FROM alpine:3.11
|
||||
LABEL maintainer "Stakater Team"
|
||||
|
||||
RUN apk add --update --no-cache ca-certificates
|
||||
|
||||
COPY Reloader /bin/Reloader
|
||||
|
||||
# On alpine 'nobody' has uid 65534
|
||||
USER 65534
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/bin/Reloader"]
|
||||
@@ -3,14 +3,14 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: v0.0.83
|
||||
appVersion: v0.0.83
|
||||
version: v0.0.124
|
||||
appVersion: v0.0.124
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
home: https://github.com/stakater/Reloader
|
||||
sources:
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
- https://github.com/stakater/Reloader
|
||||
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
|
||||
maintainers:
|
||||
- name: Stakater
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
- For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap"
|
||||
{{ .Values.reloader.custom_annotations.configmap | default "configmap.reloader.stakater.com/reload" }}: "foo-configmap"
|
||||
|
||||
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
{{ .Values.reloader.custom_annotations.secret | default "secret.reloader.stakater.com/reload" }}: "foo-secret"
|
||||
|
||||
- After successful installation, your pods will get rolling updates when a change in data of configmap or secret will happen.
|
||||
|
||||
@@ -28,6 +28,23 @@ heritage: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create pod anti affinity labels
|
||||
*/}}
|
||||
{{- define "reloader-podAntiAffinity" -}}
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ template "reloader-fullname" . }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
@@ -45,4 +62,4 @@ Create the annotations to support helm3
|
||||
{{- define "reloader-helm3.annotations" -}}
|
||||
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
|
||||
meta.helm.sh/release-name: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -32,7 +32,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -43,6 +43,18 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
@@ -65,4 +77,21 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -15,8 +15,13 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
{{- if not (.Values.reloader.enableHA) }}
|
||||
replicas: 1
|
||||
{{- else }}
|
||||
replicas: {{ .Values.reloader.deployment.replicas }}
|
||||
{{- end}}
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -44,19 +49,25 @@ spec:
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.reloader.deployment.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
{{- if or (.Values.reloader.deployment.affinity) (.Values.reloader.enableHA) }}
|
||||
affinity:
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
{{ toYaml .Values.reloader.deployment.affinity | indent 8 }}
|
||||
{{- end}}
|
||||
{{ include "reloader-podAntiAffinity" . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.priorityClassName }}
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
|
||||
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) (.Values.reloader.enableHA)}}
|
||||
env:
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.open }}
|
||||
{{- if not (empty $value) }}
|
||||
@@ -88,26 +99,50 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9091
|
||||
- name: metrics
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.livenessProbe.successThreshold | default "1" }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
port: metrics
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
|
||||
|
||||
{{- with .Values.reloader.deployment.containerSecurityContext }}
|
||||
securityContext: {{ toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA)}}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
@@ -144,6 +179,18 @@ spec:
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.isArgoRollouts true }}
|
||||
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.reloadOnCreate true }}
|
||||
- "--reload-on-create={{ .Values.reloader.reloadOnCreate }}"
|
||||
{{- end }}
|
||||
{{- if ne .Values.reloader.reloadStrategy "default" }}
|
||||
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
|
||||
{{- end }}
|
||||
{{- if or (gt .Values.reloader.deployment.replicas 1.0) (.Values.reloader.enableHA) }}
|
||||
- "--enable-ha=true"
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.resources }}
|
||||
resources:
|
||||
@@ -153,6 +200,9 @@ spec:
|
||||
securityContext: {{ toYaml .Values.reloader.deployment.securityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "reloader-serviceAccountName" . }}
|
||||
{{- if hasKey .Values.reloader.deployment "automountServiceAccountToken" }}
|
||||
automountServiceAccountToken: {{ .Values.reloader.deployment.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
{{- if .Values.reloader.podDisruptionBudget.enabled }}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
spec:
|
||||
minAvailable: {{ .Values.reloader.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if ( .Values.reloader.podMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.podMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.podMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.podMonitor.namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.podMonitor.interval }}
|
||||
interval: {{ .Values.reloader.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.podMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -32,7 +32,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -43,6 +43,18 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
@@ -65,4 +77,21 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
21
deployments/kubernetes/chart/reloader/templates/secret.yaml
Normal file
21
deployments/kubernetes/chart/reloader/templates/secret.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
{{- if .Values.reloader.deployment.env.secret -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{ if .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD -}}
|
||||
ALERT_ON_RELOAD: {{ .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_SINK -}}
|
||||
ALERT_SINK: {{ .Values.reloader.deployment.env.secret.ALERT_SINK | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL -}}
|
||||
ALERT_WEBHOOK_URL: {{ .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO -}}
|
||||
ALERT_ADDITIONAL_INFO: {{ .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -13,6 +13,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
|
||||
@@ -4,6 +4,9 @@ kind: ServiceAccount
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.reloader.serviceAccount "automountServiceAccountToken" }}
|
||||
automountServiceAccountToken: {{ .Values.reloader.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
@@ -19,4 +22,5 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
19
deployments/kubernetes/chart/reloader/values.schema.json
Normal file
19
deployments/kubernetes/chart/reloader/values.schema.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reloader": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reloadStrategy": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"default",
|
||||
"env-vars",
|
||||
"annotations"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,18 +9,25 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
reloadOnCreate: false
|
||||
reloadStrategy: default # Set to default, env-vars or annotations
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
# Set to true to enable leadership election allowing you to run multiple replicas
|
||||
enableHA: false
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
# If you wish to run multiple replicas set reloader.enableHA = true
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
|
||||
@@ -39,6 +46,13 @@ reloader:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# allowPrivilegeEscalation: false
|
||||
# readOnlyRootFilesystem: true
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
@@ -51,10 +65,10 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v0.0.77
|
||||
version: v0.0.124
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: v0.0.83
|
||||
tag: v0.0.124
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -62,9 +76,25 @@ reloader:
|
||||
open:
|
||||
# secret supports Key value pair as environment variables. It gets the values based on keys from default reloader secret if any.
|
||||
secret:
|
||||
# ALERT_ON_RELOAD: <"true"|"false">
|
||||
# ALERT_SINK: <"slack"> # By default it will be a raw text based webhook
|
||||
# ALERT_WEBHOOK_URL: <"webhook_url">
|
||||
# ALERT_ADDITIONAL_INFO: <"Additional Info like Cluster Name if needed">
|
||||
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
|
||||
field:
|
||||
|
||||
# Liveness and readiness probe timeout values.
|
||||
livenessProbe: {}
|
||||
# timeoutSeconds: 5
|
||||
# failureThreshold: 5
|
||||
# periodSeconds: 10
|
||||
# successThreshold: 1
|
||||
readinessProbe: {}
|
||||
# timeoutSeconds: 15
|
||||
# failureThreshold: 5
|
||||
# periodSeconds: 10
|
||||
# successThreshold: 1
|
||||
|
||||
# Specify resource requests/limits for the deployment.
|
||||
# Example:
|
||||
# resources:
|
||||
@@ -77,6 +107,7 @@ reloader:
|
||||
resources: {}
|
||||
pod:
|
||||
annotations: {}
|
||||
priorityClassName: ""
|
||||
|
||||
service: {}
|
||||
# labels: {}
|
||||
@@ -101,8 +132,10 @@ reloader:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
|
||||
serviceMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
# Deprecated: Service monitor will be removed in future releases of reloader in favour of Pod monitor
|
||||
# Enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
@@ -112,4 +145,19 @@ reloader:
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
podMonitor:
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
# Set the minimum available replicas
|
||||
# minAvailable: 1
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -9,10 +9,10 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
@@ -46,4 +46,10 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -9,10 +9,10 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
@@ -23,4 +23,3 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
|
||||
@@ -8,15 +8,15 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v0.0.124
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
@@ -28,34 +28,41 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v0.0.124
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
- image: "stakater/reloader:v0.0.124"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9091
|
||||
- name: metrics
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
port: metrics
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
|
||||
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# Source: reloader/templates/podmonitor.yaml
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -9,9 +8,9 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
|
||||
namespace: default
|
||||
|
||||
@@ -1,7 +1,23 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -9,10 +25,10 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
@@ -46,11 +62,17 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -58,10 +80,10 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
@@ -72,7 +94,6 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
@@ -83,15 +104,15 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v0.0.124
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
@@ -103,67 +124,41 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v0.0.124"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v0.0.124
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
- image: "stakater/reloader:v0.0.124"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9091
|
||||
- name: metrics
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
port: metrics
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
|
||||
@@ -9,9 +9,12 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
reloadOnCreate: false
|
||||
reloadStrategy: default # Set to default, env-vars or annotations
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
@@ -21,6 +24,7 @@ reloader:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
|
||||
@@ -39,6 +43,13 @@ reloader:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# allowPrivilegeEscalation: false
|
||||
# readOnlyRootFilesystem: true
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
@@ -113,3 +124,15 @@ reloader:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
podMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
12
docs/Alerting.md
Normal file
12
docs/Alerting.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Alerting on Reload
|
||||
Reloader can alert when it triggers a rolling upgrade on Deployments or StatefulSets. Webhook notification alert would be sent to the configured webhook server with all the required information
|
||||
|
||||
#### Enabling the feature
|
||||
In-order to enable this feature, you need to update the reloader.env.secret section of values.yaml providing the information needed for alert.
|
||||
<pre> ALERT_ON_RELOAD: [ true/false ] Default: false
|
||||
ALERT_SINK: [ slack/webhook ] Default: webhook
|
||||
ALERT_WEBHOOK_URL: Required if ALERT_ON_RELOAD is true
|
||||
ALERT_ADDITIONAL_INFO: Any additional information to be added to alert
|
||||
|
||||
#### Slack incoming-webhook creation docs
|
||||
https://api.slack.com/messaging/webhooks
|
||||
112
go.mod
112
go.mod
@@ -1,27 +1,99 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.15
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v0.7.2
|
||||
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/onsi/ginkgo v1.10.2 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible
|
||||
github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372
|
||||
github.com/prometheus/client_golang v1.4.1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7
|
||||
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
|
||||
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8
|
||||
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90
|
||||
github.com/argoproj/argo-rollouts v1.2.1
|
||||
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
|
||||
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
|
||||
github.com/parnurzeal/gorequest v0.2.16
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.5.0
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
k8s.io/kubectl v0.23.1
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/openshift/api => github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible // prebase-1.16
|
||||
github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372 // prebase-1.16
|
||||
k8s.io/api => k8s.io/api v0.0.0-20191004120104-195af9ec3521 // release-1.16
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 // kubernetes-1.16.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 // kubernetes-1.16.0
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.36.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.60.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 // indirect
|
||||
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c // indirect
|
||||
moul.io/http2curl v1.0.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
// Replacements for argo-rollouts
|
||||
replace (
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
k8s.io/api v0.0.0 => k8s.io/api v0.24.2
|
||||
k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.24.2
|
||||
k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.21.0-alpha.0
|
||||
k8s.io/apiserver v0.0.0 => k8s.io/apiserver v0.24.2
|
||||
k8s.io/cli-runtime v0.0.0 => k8s.io/cli-runtime v0.24.2
|
||||
k8s.io/client-go v0.0.0 => k8s.io/client-go v0.24.2
|
||||
k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2
|
||||
k8s.io/cluster-bootstrap v0.0.0 => k8s.io/cluster-bootstrap v0.24.2
|
||||
k8s.io/code-generator v0.0.0 => k8s.io/code-generator v0.20.5-rc.0
|
||||
k8s.io/component-base v0.0.0 => k8s.io/component-base v0.24.2
|
||||
k8s.io/component-helpers v0.0.0 => k8s.io/component-helpers v0.24.2
|
||||
k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2
|
||||
k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0
|
||||
k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.24.2
|
||||
k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.24.2
|
||||
k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2
|
||||
k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2
|
||||
k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2
|
||||
k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.24.2
|
||||
k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2
|
||||
k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2
|
||||
k8s.io/metrics v0.0.0 => k8s.io/metrics v0.24.2
|
||||
k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.20.5-rc.0
|
||||
k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.24.2
|
||||
k8s.io/sample-cli-plugin v0.0.0 => k8s.io/sample-cli-plugin v0.24.2
|
||||
k8s.io/sample-controller v0.0.0 => k8s.io/sample-controller v0.24.2
|
||||
)
|
||||
|
||||
94
internal/pkg/alerts/alert.go
Normal file
94
internal/pkg/alerts/alert.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package alert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// function to send alert msg to webhook service
|
||||
func SendWebhookAlert(msg string) {
|
||||
webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL")
|
||||
if !ok {
|
||||
logrus.Error("ALERT_WEBHOOK_URL env variable not provided")
|
||||
return
|
||||
}
|
||||
webhook_url = strings.TrimSpace(webhook_url)
|
||||
alert_sink := os.Getenv("ALERT_SINK")
|
||||
alert_sink = strings.ToLower(strings.TrimSpace(alert_sink))
|
||||
|
||||
// Provision to add Proxy to reach webhook server if required
|
||||
webhook_proxy := os.Getenv("ALERT_WEBHOOK_PROXY")
|
||||
webhook_proxy = strings.TrimSpace(webhook_proxy)
|
||||
|
||||
// Provision to add Additional information in the alert. e.g ClusterName
|
||||
alert_additional_info, ok := os.LookupEnv("ALERT_ADDITIONAL_INFO")
|
||||
if ok {
|
||||
alert_additional_info = strings.TrimSpace(alert_additional_info)
|
||||
msg = fmt.Sprintf("%s : %s", alert_additional_info, msg)
|
||||
}
|
||||
|
||||
if alert_sink == "slack" {
|
||||
sendSlackAlert(webhook_url, webhook_proxy, msg)
|
||||
} else {
|
||||
msg = strings.Replace(msg, "*", "", -1)
|
||||
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// function to handle server redirection
|
||||
func redirectPolicy(req gorequest.Request, via []gorequest.Request) error {
|
||||
return fmt.Errorf("incorrect token (redirection)")
|
||||
}
|
||||
|
||||
// function to send alert to slack
|
||||
func sendSlackAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
attachment := Attachment{
|
||||
Text: msg,
|
||||
Color: "good",
|
||||
AuthorName: "Reloader",
|
||||
}
|
||||
|
||||
payload := WebhookMessage{
|
||||
Attachments: []Attachment{attachment},
|
||||
}
|
||||
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(payload).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// function to send alert to webhook service as text
|
||||
func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
Type("text").
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(msg).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
61
internal/pkg/alerts/slack_alert.go
Normal file
61
internal/pkg/alerts/slack_alert.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package alert
|
||||
|
||||
type WebhookMessage struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
IconEmoji string `json:"icon_emoji,omitempty"`
|
||||
IconURL string `json:"icon_url,omitempty"`
|
||||
Channel string `json:"channel,omitempty"`
|
||||
ThreadTimestamp string `json:"thread_ts,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Attachments []Attachment `json:"attachments,omitempty"`
|
||||
Parse string `json:"parse,omitempty"`
|
||||
ResponseType string `json:"response_type,omitempty"`
|
||||
ReplaceOriginal bool `json:"replace_original,omitempty"`
|
||||
DeleteOriginal bool `json:"delete_original,omitempty"`
|
||||
ReplyBroadcast bool `json:"reply_broadcast,omitempty"`
|
||||
}
|
||||
|
||||
type Attachment struct {
|
||||
Color string `json:"color,omitempty"`
|
||||
Fallback string `json:"fallback,omitempty"`
|
||||
|
||||
CallbackID string `json:"callback_id,omitempty"`
|
||||
ID int `json:"id,omitempty"`
|
||||
|
||||
AuthorID string `json:"author_id,omitempty"`
|
||||
AuthorName string `json:"author_name,omitempty"`
|
||||
AuthorSubname string `json:"author_subname,omitempty"`
|
||||
AuthorLink string `json:"author_link,omitempty"`
|
||||
AuthorIcon string `json:"author_icon,omitempty"`
|
||||
|
||||
Title string `json:"title,omitempty"`
|
||||
TitleLink string `json:"title_link,omitempty"`
|
||||
Pretext string `json:"pretext,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
|
||||
ImageURL string `json:"image_url,omitempty"`
|
||||
ThumbURL string `json:"thumb_url,omitempty"`
|
||||
|
||||
ServiceName string `json:"service_name,omitempty"`
|
||||
ServiceIcon string `json:"service_icon,omitempty"`
|
||||
FromURL string `json:"from_url,omitempty"`
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
|
||||
MarkdownIn []string `json:"mrkdwn_in,omitempty"`
|
||||
|
||||
Footer string `json:"footer,omitempty"`
|
||||
FooterIcon string `json:"footer_icon,omitempty"`
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Title string `json:"title"`
|
||||
Value string `json:"value"`
|
||||
Short bool `json:"short"`
|
||||
}
|
||||
|
||||
type Action struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text"`
|
||||
Url string `json:"url"`
|
||||
Style string `json:"style"`
|
||||
}
|
||||
@@ -1,39 +1,41 @@
|
||||
package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
)
|
||||
|
||||
//ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kube.Clients, string) []interface{}
|
||||
// ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kube.Clients, string) []runtime.Object
|
||||
|
||||
//ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(interface{}) []v1.Container
|
||||
// ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(runtime.Object) []v1.Container
|
||||
|
||||
//InitContainersFunc is a generic func to return containers
|
||||
type InitContainersFunc func(interface{}) []v1.Container
|
||||
// InitContainersFunc is a generic func to return containers
|
||||
type InitContainersFunc func(runtime.Object) []v1.Container
|
||||
|
||||
//VolumesFunc is a generic func to return volumes
|
||||
type VolumesFunc func(interface{}) []v1.Volume
|
||||
// VolumesFunc is a generic func to return volumes
|
||||
type VolumesFunc func(runtime.Object) []v1.Volume
|
||||
|
||||
//UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kube.Clients, string, interface{}) error
|
||||
// UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kube.Clients, string, runtime.Object) error
|
||||
|
||||
//AnnotationsFunc is a generic func to return annotations
|
||||
type AnnotationsFunc func(interface{}) map[string]string
|
||||
// AnnotationsFunc is a generic func to return annotations
|
||||
type AnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
//PodAnnotationsFunc is a generic func to return annotations
|
||||
type PodAnnotationsFunc func(interface{}) map[string]string
|
||||
// PodAnnotationsFunc is a generic func to return annotations
|
||||
type PodAnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
// RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
type RollingUpgradeFuncs struct {
|
||||
ItemsFunc ItemsFunc
|
||||
AnnotationsFunc AnnotationsFunc
|
||||
@@ -46,209 +48,260 @@ type RollingUpgradeFuncs struct {
|
||||
}
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(meta_v1.ListOptions{})
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deployments %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deployments.Items)
|
||||
|
||||
items := make([]runtime.Object, len(deployments.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deployments.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
items[i] = &deployments.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(meta_v1.ListOptions{})
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list daemonSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(daemonSets.Items)
|
||||
|
||||
items := make([]runtime.Object, len(daemonSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range daemonSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &daemonSets.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetStatefulSetItems returns the statefulSets in given namespace
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(meta_v1.ListOptions{})
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list statefulSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(statefulSets.Items)
|
||||
|
||||
items := make([]runtime.Object, len(statefulSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range statefulSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &statefulSets.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(meta_v1.ListOptions{})
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deploymentConfigs %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deploymentConfigs.Items)
|
||||
|
||||
items := make([]runtime.Object, len(deploymentConfigs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deploymentConfigs.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &deploymentConfigs.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetRolloutItems returns the rollouts in given namespace
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
|
||||
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(meta_v1.ListOptions{})
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list Rollouts %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(rollouts.Items)
|
||||
|
||||
items := make([]runtime.Object, len(rollouts.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range rollouts.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &rollouts.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDeploymentAnnotations returns the annotations of given deployment
|
||||
func GetDeploymentAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).ObjectMeta.Annotations
|
||||
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.Deployment).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetAnnotations returns the annotations of given daemonSet
|
||||
func GetDaemonSetAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetAnnotations returns the annotations of given statefulSet
|
||||
func GetStatefulSetAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
|
||||
func GetDeploymentConfigAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
func GetDeploymentConfigAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutAnnotations returns the annotations of given rollout
|
||||
func GetRolloutAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
func GetRolloutAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
|
||||
func GetDeploymentPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
|
||||
func GetDaemonSetPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
|
||||
func GetStatefulSetPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
|
||||
func GetDeploymentConfigPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
func GetDeploymentConfigPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
|
||||
func GetRolloutPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentContainers returns the containers of given deployment
|
||||
func GetDeploymentContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.Containers
|
||||
func GetDeploymentContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDaemonSetContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
func GetDaemonSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetStatefulSetContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.Containers
|
||||
func GetStatefulSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
func GetDeploymentConfigContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetRolloutContainers returns the containers of given rollout
|
||||
func GetRolloutContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
func GetRolloutContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentInitContainers returns the containers of given deployment
|
||||
func GetDeploymentInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.InitContainers
|
||||
func GetDeploymentInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDaemonSetInitContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
func GetDaemonSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetStatefulSetInitContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
func GetStatefulSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigInitContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
func GetDeploymentConfigInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetRolloutInitContainers returns the containers of given rollout
|
||||
func GetRolloutInitContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
func GetRolloutInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deployment := resource.(appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(&deployment)
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deployment := resource.(*appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
daemonSet := resource.(appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(&daemonSet)
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
daemonSet := resource.(*appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulSet performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
statefulSet := resource.(appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(&statefulSet)
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
statefulSet := resource.(*appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(&deploymentConfig)
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deploymentConfig := resource.(*openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
rollout := resource.(argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(rollout.Name, meta_v1.GetOptions{})
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
rollout := resource.(*argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
|
||||
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(&rollout)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDeploymentVolumes returns the Volumes of given deployment
|
||||
func GetDeploymentVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.Volumes
|
||||
func GetDeploymentVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDaemonSetVolumes returns the Volumes of given daemonSet
|
||||
func GetDaemonSetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
func GetDaemonSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetStatefulSetVolumes returns the Volumes of given statefulSet
|
||||
func GetStatefulSetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
func GetStatefulSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
|
||||
func GetDeploymentConfigVolumes(item interface{}) []v1.Volume {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
func GetDeploymentConfigVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetRolloutVolumes returns the Volumes of given rollout
|
||||
func GetRolloutVolumes(item interface{}) []v1.Volume {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
func GetRolloutVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/leadership"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -18,9 +23,10 @@ import (
|
||||
// NewReloaderCommand starts the reloader controller
|
||||
func NewReloaderCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "reloader",
|
||||
Short: "A watcher for your Kubernetes cluster",
|
||||
Run: startReloader,
|
||||
Use: "reloader",
|
||||
Short: "A watcher for your Kubernetes cluster",
|
||||
PreRunE: validateFlags,
|
||||
Run: startReloader,
|
||||
}
|
||||
|
||||
// options
|
||||
@@ -32,9 +38,39 @@ func NewReloaderCommand() *cobra.Command {
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validateFlags(*cobra.Command, []string) error {
|
||||
// Ensure the reload strategy is one of the following...
|
||||
var validReloadStrategy bool
|
||||
valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy}
|
||||
for _, s := range valid {
|
||||
if s == options.ReloadStrategy {
|
||||
validReloadStrategy = true
|
||||
}
|
||||
}
|
||||
|
||||
if !validReloadStrategy {
|
||||
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
|
||||
return errors.New(err)
|
||||
}
|
||||
|
||||
// Validate that HA options are correct
|
||||
if options.EnableHA {
|
||||
if err := validateHAEnvs(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureLogging(logFormat string) error {
|
||||
switch logFormat {
|
||||
case "json":
|
||||
@@ -48,6 +84,25 @@ func configureLogging(logFormat string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateHAEnvs() error {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
|
||||
if podName == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNameEnv, constants.PodNameEnv)
|
||||
}
|
||||
if podNamespace == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNamespaceEnv, constants.PodNamespaceEnv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHAEnvs() (string, string) {
|
||||
podName := os.Getenv(constants.PodNameEnv)
|
||||
podNamespace := os.Getenv(constants.PodNamespaceEnv)
|
||||
|
||||
return podName, podNamespace
|
||||
}
|
||||
|
||||
func startReloader(cmd *cobra.Command, args []string) {
|
||||
err := configureLogging(options.LogFormat)
|
||||
if err != nil {
|
||||
@@ -79,6 +134,7 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
|
||||
collectors := metrics.SetupPrometheusEndpoint()
|
||||
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
if ignoredResourcesList.Contains(k) {
|
||||
continue
|
||||
@@ -89,6 +145,12 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
|
||||
// If HA is enabled we only run the controller when
|
||||
if options.EnableHA {
|
||||
continue
|
||||
}
|
||||
// Now let's start the controller
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
@@ -96,8 +158,16 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
go c.Run(1, stop)
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
select {}
|
||||
// Run leadership election
|
||||
if options.EnableHA {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
|
||||
}
|
||||
|
||||
logrus.Fatal(leadership.Healthz())
|
||||
}
|
||||
|
||||
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
|
||||
|
||||
@@ -7,4 +7,23 @@ const (
|
||||
SecretEnvVarPostfix = "SECRET"
|
||||
// EnvVarPrefix is a Prefix for environment variable
|
||||
EnvVarPrefix = "STAKATER_"
|
||||
|
||||
// ReloaderAnnotationPrefix is a Prefix for all reloader annotations
|
||||
ReloaderAnnotationPrefix = "reloader.stakater.com"
|
||||
// LastReloadedFromAnnotation is an annotation used to describe the last resource that triggered a reload
|
||||
LastReloadedFromAnnotation = "last-reloaded-from"
|
||||
|
||||
// ReloadStrategyFlag The reload strategy flag name
|
||||
ReloadStrategyFlag = "reload-strategy"
|
||||
// EnvVarsReloadStrategy instructs Reloader to add container environment variables to facilitate a restart
|
||||
EnvVarsReloadStrategy = "env-vars"
|
||||
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
|
||||
AnnotationsReloadStrategy = "annotations"
|
||||
)
|
||||
|
||||
// Leadership election related consts
|
||||
const (
|
||||
LockName string = "stakaer-reloader-lock"
|
||||
PodNameEnv string = "POD_NAME"
|
||||
PodNamespaceEnv string = "POD_NAMESPACE"
|
||||
)
|
||||
|
||||
@@ -7,16 +7,19 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
)
|
||||
|
||||
// Controller for checking events
|
||||
@@ -28,8 +31,12 @@ type Controller struct {
|
||||
namespace string
|
||||
ignoredNamespaces util.List
|
||||
collectors metrics.Collectors
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// controllerInitialized flag determines whether controlled is being initialized
|
||||
var controllerInitialized bool = false
|
||||
|
||||
// NewController for initializing a Controller
|
||||
func NewController(
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
|
||||
@@ -39,6 +46,11 @@ func NewController(
|
||||
namespace: namespace,
|
||||
ignoredNamespaces: ignoredNamespaces,
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{
|
||||
Interface: client.CoreV1().Events(""),
|
||||
})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)})
|
||||
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, fields.Everything())
|
||||
@@ -52,13 +64,23 @@ func NewController(
|
||||
c.informer = informer
|
||||
c.queue = queue
|
||||
c.collectors = collectors
|
||||
c.recorder = recorder
|
||||
|
||||
logrus.Infof("created controller for: %s", resource)
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
// Not required as reloader should update the resource in the event of any change and not in the event of any resource creation.
|
||||
// This causes the issue where reloader reloads the pods when reloader itself gets restarted as it's queue is filled with all the k8s objects as new resources.
|
||||
if options.ReloadOnCreate == "true" {
|
||||
if !c.resourceInIgnoredNamespace(obj) && controllerInitialized {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
@@ -78,6 +100,7 @@ func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
Resource: new,
|
||||
OldResource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -87,7 +110,7 @@ func (c *Controller) Delete(old interface{}) {
|
||||
// Todo: Any future delete event can be handled here
|
||||
}
|
||||
|
||||
//Run function for controller which handles the queue
|
||||
// Run function for controller which handles the queue
|
||||
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
@@ -111,6 +134,9 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
// At this point the controller is fully initialized and we can start processing the resources
|
||||
controllerInitialized = true
|
||||
|
||||
for c.processNextItem() {
|
||||
}
|
||||
}
|
||||
@@ -145,7 +171,7 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
|
||||
// This controller retries 5 times if something goes wrong. After that, it stops trying.
|
||||
if c.queue.NumRequeues(key) < 5 {
|
||||
logrus.Errorf("Error syncing events %v: %v", key, err)
|
||||
logrus.Errorf("Error syncing events: %v", err)
|
||||
|
||||
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
||||
// queue and the re-enqueue history, the key will be processed later again.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,12 +5,14 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceCreatedHandler contains new objects
|
||||
type ResourceCreatedHandler struct {
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Handle processes the newly created resource
|
||||
@@ -20,7 +22,7 @@ func (r ResourceCreatedHandler) Handle() error {
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// process resource based on its type
|
||||
doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceUpdatedHandler contains updated objects
|
||||
@@ -12,6 +13,7 @@ type ResourceUpdatedHandler struct {
|
||||
Resource interface{}
|
||||
OldResource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Handle processes the updated resource
|
||||
@@ -22,7 +24,7 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
// process resource based on its type
|
||||
doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
alert "github.com/stakater/Reloader/internal/pkg/alerts"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
@@ -13,6 +19,9 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
|
||||
@@ -85,32 +94,52 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) {
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
|
||||
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
|
||||
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
|
||||
|
||||
if kube.IsOpenshift {
|
||||
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
|
||||
if kube.IsOpenshift {
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.IsArgoRollouts == "true" {
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) {
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors, recorder)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
var err error
|
||||
|
||||
for _, i := range items {
|
||||
// find correct annotation and update the resource
|
||||
annotations := upgradeFuncs.AnnotationsFunc(i)
|
||||
@@ -126,15 +155,16 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
result := constants.NotUpdated
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
if err == nil && reloaderEnabled {
|
||||
result = updateContainers(upgradeFuncs, i, config, true)
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
|
||||
if result != constants.Updated && annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
result = updateContainers(upgradeFuncs, i, config, false)
|
||||
value = strings.TrimSpace(value)
|
||||
re := regexp.MustCompile("^" + value + "$")
|
||||
if re.Match([]byte(config.ResourceName)) {
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, false)
|
||||
if result == constants.Updated {
|
||||
break
|
||||
}
|
||||
@@ -145,24 +175,44 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
if result != constants.Updated && searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
result = updateContainers(upgradeFuncs, i, config, true)
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
}
|
||||
|
||||
if result == constants.Updated {
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
resourceName := util.ToObjectMeta(i).Name
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
return err
|
||||
}
|
||||
resourceName := accessor.GetName()
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
logrus.Errorf(message)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeWarning, "ReloadFail", message)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
logrus.Infof(message)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
|
||||
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeNormal, "Reloaded", message)
|
||||
}
|
||||
if ok && alert_on_reload == "true" {
|
||||
msg := fmt.Sprintf(
|
||||
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
|
||||
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
alert.SendWebhookAlert(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
|
||||
@@ -236,7 +286,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
|
||||
volumes := upgradeFuncs.VolumesFunc(item)
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
initContainers := upgradeFuncs.InitContainersFunc(item)
|
||||
@@ -275,10 +325,70 @@ func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
|
||||
return container
|
||||
}
|
||||
|
||||
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
}
|
||||
|
||||
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
|
||||
// Note: the data on this struct is purely informational and is not used for future updates
|
||||
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
annotations, err := createReloadedAnnotations(&reloadSource)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
|
||||
return constants.NotUpdated
|
||||
}
|
||||
|
||||
// Copy the all annotations to the item's annotations
|
||||
pa := upgradeFuncs.PodAnnotationsFunc(item)
|
||||
if pa == nil {
|
||||
return constants.NotUpdated
|
||||
}
|
||||
|
||||
for k, v := range annotations {
|
||||
pa[k] = v
|
||||
}
|
||||
|
||||
return constants.Updated
|
||||
}
|
||||
|
||||
func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
|
||||
if target == nil {
|
||||
return nil, errors.New("target is required")
|
||||
}
|
||||
|
||||
// Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the
|
||||
// resource that caused the last invokeReloadStrategy.
|
||||
// Intentionally only storing the last item in order to keep
|
||||
// the generated annotations as small as possible.
|
||||
annotations := make(map[string]string)
|
||||
lastReloadedResourceName := fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
|
||||
lastReloadedResource, err := json.Marshal(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
annotations[lastReloadedResourceName] = string(lastReloadedResource)
|
||||
return annotations, nil
|
||||
}
|
||||
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
var result constants.Result
|
||||
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
110
internal/pkg/leadership/leadership.go
Normal file
110
internal/pkg/leadership/leadership.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
||||
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
|
||||
)
|
||||
|
||||
const healthPort string = ":9091"
|
||||
|
||||
var (
|
||||
// Used for liveness probe
|
||||
m sync.Mutex
|
||||
healthy bool = true
|
||||
)
|
||||
|
||||
func GetNewLock(client coordinationv1.CoordinationV1Interface, lockName, podname, namespace string) *resourcelock.LeaseLock {
|
||||
return &resourcelock.LeaseLock{
|
||||
LeaseMeta: v1.ObjectMeta{
|
||||
Name: lockName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Client: client,
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: podname,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// runLeaderElection runs leadership election. If an instance of the controller is the leader and stops leading it will shutdown.
|
||||
func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel context.CancelFunc, id string, controllers []*controller.Controller) {
|
||||
// Construct channels for the controllers to use
|
||||
var stopChannels []chan struct{}
|
||||
for i := 0; i < len(controllers); i++ {
|
||||
stop := make(chan struct{})
|
||||
stopChannels = append(stopChannels, stop)
|
||||
}
|
||||
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: 15 * time.Second,
|
||||
RenewDeadline: 10 * time.Second,
|
||||
RetryPeriod: 2 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(c context.Context) {
|
||||
logrus.Info("became leader, starting controllers")
|
||||
runControllers(controllers, stopChannels)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
logrus.Info("no longer leader, shutting down")
|
||||
stopControllers(stopChannels)
|
||||
cancel()
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
healthy = false
|
||||
},
|
||||
OnNewLeader: func(current_id string) {
|
||||
if current_id == id {
|
||||
logrus.Info("still the leader!")
|
||||
return
|
||||
}
|
||||
logrus.Infof("new leader is %s", current_id)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) {
|
||||
for i, c := range controllers {
|
||||
c := c
|
||||
go c.Run(1, stopChannels[i])
|
||||
}
|
||||
}
|
||||
|
||||
func stopControllers(stopChannels []chan struct{}) {
|
||||
for _, c := range stopChannels {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Healthz serves the liveness probe endpoint. If leadership election is
|
||||
// enabled and a replica stops leading the liveness probe will fail and the
|
||||
// kubelet will restart the container.
|
||||
func Healthz() error {
|
||||
http.HandleFunc("/live", healthz)
|
||||
return http.ListenAndServe(healthPort, nil)
|
||||
}
|
||||
|
||||
func healthz(w http.ResponseWriter, req *http.Request) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if healthy {
|
||||
if i, err := w.Write([]byte("alive")); err != nil {
|
||||
logrus.Infof("failed to write liveness response, wrote: %d bytes, got err: %s", i, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
213
internal/pkg/leadership/leadership_test.go
Normal file
213
internal/pkg/leadership/leadership_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
testutil.CreateNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
|
||||
testutil.DeleteNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func TestHealthz(t *testing.T) {
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 200
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Have the liveness probe serve a 500
|
||||
healthy = false
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElection validates that the liveness endpoint serves 500 when
|
||||
// leadership election fails
|
||||
func TestRunLeaderElection(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), constants.LockName, testutil.Pod, testutil.Namespace)
|
||||
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, []*controller.Controller{})
|
||||
|
||||
// Liveness probe should be serving OK
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Cancel the leader election context, so leadership is released and
|
||||
// live endpoint serves 500
|
||||
cancel()
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElectionWithControllers tests that leadership election works
|
||||
// wiht real controllers and that on context cancellation the controllers stop
|
||||
// running.
|
||||
func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
t.Logf("Creating controller")
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, metrics.NewCollectors())
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), fmt.Sprintf("%s-%d", constants.LockName, 1), testutil.Pod, testutil.Namespace)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
// Start running leadership election, this also starts the controllers
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, controllers)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Create some stuff and do a thing
|
||||
configmapName := testutil.ConfigmapNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
configmapClient, err := testutil.CreateConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating the configmap %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(testutil.Clients.KubernetesClient, configmapName, testutil.Namespace, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating configmap for first time
|
||||
updateErr := testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod envvars has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Fatalf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
|
||||
// Cancel the leader election context, so leadership is released
|
||||
logrus.Info("shutting down controller from test")
|
||||
cancel()
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Updating configmap again
|
||||
updateErr = testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com/new")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
|
||||
// Verifying that the deployment was not updated as leadership has been lost
|
||||
logrus.Infof("Verifying pod envvars has not been updated")
|
||||
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
|
||||
config = util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs = handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated = testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if updated {
|
||||
t.Fatalf("Deployment was updated")
|
||||
}
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap
|
||||
err = testutil.DeleteConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
package options
|
||||
|
||||
import "github.com/stakater/Reloader/internal/pkg/constants"
|
||||
|
||||
var (
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// configmaps specified by name
|
||||
@@ -17,4 +19,12 @@ var (
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// IsArgoRollouts Adds support for argo rollouts
|
||||
IsArgoRollouts = "false"
|
||||
// ReloadStrategy Specify the update strategy
|
||||
ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
// ReloadOnCreate Adds support to watch create events
|
||||
ReloadOnCreate = "false"
|
||||
// EnableHA adds support for running multiple replicas via leadership election
|
||||
EnableHA = false
|
||||
)
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -13,11 +16,13 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
@@ -31,9 +36,22 @@ var (
|
||||
SecretResourceType = "secrets"
|
||||
)
|
||||
|
||||
var (
|
||||
Clients = kube.GetClients()
|
||||
Pod = "test-reloader-" + RandSeq(5)
|
||||
Namespace = "test-reloader-" + RandSeq(5)
|
||||
ConfigmapNamePrefix = "testconfigmap-reloader"
|
||||
SecretNamePrefix = "testsecret-reloader"
|
||||
Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
|
||||
Collectors = metrics.NewCollectors()
|
||||
SleepDuration = 3 * time.Second
|
||||
)
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
|
||||
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to create namespace for testing %v", err)
|
||||
} else {
|
||||
@@ -43,7 +61,7 @@ func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
|
||||
// DeleteNamespace deletes namespace for testing
|
||||
func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to delete namespace that was created for testing %v", err)
|
||||
} else {
|
||||
@@ -562,8 +580,8 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
|
||||
}
|
||||
}
|
||||
|
||||
// GetResourceSHA returns the SHA value of given environment variable
|
||||
func GetResourceSHA(containers []v1.Container, envVar string) string {
|
||||
// GetResourceSHAFromEnvVar returns the SHA value of given environment variable
|
||||
func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
@@ -575,7 +593,29 @@ func GetResourceSHA(containers []v1.Container, envVar string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
//ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
// GetResourceSHAFromAnnotation returns the SHA value of given environment variable
|
||||
func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
|
||||
lastReloadedResourceName := fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
|
||||
annotationJson, ok := podAnnotations[lastReloadedResourceName]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
var last util.ReloadSource
|
||||
bytes := []byte(annotationJson)
|
||||
err := json.Unmarshal(bytes, &last)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return last.Hash
|
||||
}
|
||||
|
||||
// ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
|
||||
values := []string{}
|
||||
if resourceType == SecretResourceType {
|
||||
@@ -597,7 +637,7 @@ func ConvertResourceToSHA(resourceType string, namespace string, resourceName st
|
||||
func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) {
|
||||
logrus.Infof("Creating configmap")
|
||||
configmapClient := client.CoreV1().ConfigMaps(namespace)
|
||||
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
|
||||
_, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return configmapClient, err
|
||||
}
|
||||
@@ -606,7 +646,7 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
|
||||
logrus.Infof("Creating secret")
|
||||
secretClient := client.CoreV1().Secrets(namespace)
|
||||
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
|
||||
_, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return secretClient, err
|
||||
}
|
||||
@@ -621,7 +661,7 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -636,7 +676,7 @@ func CreateDeploymentConfig(client appsclient.Interface, deploymentName string,
|
||||
} else {
|
||||
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(deploymentConfigObj)
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{})
|
||||
time.Sleep(5 * time.Second)
|
||||
return deploymentConfig, err
|
||||
}
|
||||
@@ -651,7 +691,7 @@ func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentNa
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -661,7 +701,7 @@ func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentNam
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
|
||||
@@ -672,7 +712,7 @@ func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentN
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -684,7 +724,7 @@ func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface,
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deploymentObj.Annotations = annotations
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -699,7 +739,7 @@ func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespac
|
||||
} else {
|
||||
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
|
||||
}
|
||||
daemonset, err := daemonsetClient.Create(daemonsetObj)
|
||||
daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonset, err
|
||||
}
|
||||
@@ -714,7 +754,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
} else {
|
||||
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
|
||||
}
|
||||
statefulset, err := statefulsetClient.Create(statefulsetObj)
|
||||
statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulset, err
|
||||
}
|
||||
@@ -722,7 +762,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
// DeleteDeployment creates a deployment in given namespace and returns the error if any
|
||||
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
|
||||
logrus.Infof("Deleting Deployment")
|
||||
deploymentError := client.AppsV1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
|
||||
deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentError
|
||||
}
|
||||
@@ -730,7 +770,7 @@ func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentN
|
||||
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
|
||||
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
|
||||
logrus.Infof("Deleting DeploymentConfig")
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(deploymentConfigName, &metav1.DeleteOptions{})
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentConfigError
|
||||
}
|
||||
@@ -738,7 +778,7 @@ func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deplo
|
||||
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
|
||||
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
|
||||
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
|
||||
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
|
||||
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonsetError
|
||||
}
|
||||
@@ -746,7 +786,7 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetNam
|
||||
// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any
|
||||
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
|
||||
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
|
||||
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
|
||||
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulsetError
|
||||
}
|
||||
@@ -760,7 +800,7 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
|
||||
} else {
|
||||
configmap = GetConfigmap(namespace, configmapName, data)
|
||||
}
|
||||
_, updateErr := configmapClient.Update(configmap)
|
||||
_, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
@@ -774,7 +814,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
} else {
|
||||
secret = GetSecret(namespace, secretName, data)
|
||||
}
|
||||
_, updateErr := secretClient.Update(secret)
|
||||
_, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
@@ -782,7 +822,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
|
||||
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
|
||||
logrus.Infof("Deleting configmap %q.\n", configmapName)
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
@@ -790,7 +830,7 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
// DeleteSecret deletes a secret in given namespace and returns the error if any
|
||||
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
|
||||
logrus.Infof("Deleting secret %q.\n", secretName)
|
||||
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
@@ -805,15 +845,20 @@ func RandSeq(n int) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// VerifyResourceUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
annotations := accessor.GetAnnotations()
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
|
||||
annotationValue := annotations[config.Annotation]
|
||||
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
@@ -835,7 +880,50 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
|
||||
|
||||
if matches {
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix
|
||||
updated := GetResourceSHA(containers, envName)
|
||||
updated := GetResourceSHAFromEnvVar(containers, envName)
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
annotations := accessor.GetAnnotations()
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := annotations[config.Annotation]
|
||||
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
matches = true
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if searchAnnotationValue == "true" {
|
||||
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
|
||||
matches = true
|
||||
}
|
||||
}
|
||||
|
||||
if matches {
|
||||
updated := GetResourceSHAFromAnnotation(podAnnotations)
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
|
||||
39
internal/pkg/util/reload_source.go
Normal file
39
internal/pkg/util/reload_source.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package util
|
||||
|
||||
import "time"
|
||||
|
||||
type ReloadSource struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Hash string `json:"hash"`
|
||||
ContainerRefs []string `json:"containerRefs"`
|
||||
ObservedAt int64 `json:"observedAt"`
|
||||
}
|
||||
|
||||
func NewReloadSource(
|
||||
resourceName string,
|
||||
resourceNamespace string,
|
||||
resourceType string,
|
||||
resourceHash string,
|
||||
containerRefs []string,
|
||||
) ReloadSource {
|
||||
return ReloadSource{
|
||||
ObservedAt: time.Now().Unix(),
|
||||
Name: resourceName,
|
||||
Namespace: resourceNamespace,
|
||||
Type: resourceType,
|
||||
Hash: resourceHash,
|
||||
ContainerRefs: containerRefs,
|
||||
}
|
||||
}
|
||||
|
||||
func NewReloadSourceFromConfig(config Config, containerRefs []string) ReloadSource {
|
||||
return NewReloadSource(
|
||||
config.ResourceName,
|
||||
config.Namespace,
|
||||
config.Type,
|
||||
config.SHAValue,
|
||||
containerRefs,
|
||||
)
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@@ -67,7 +68,7 @@ func isOpenshift() bool {
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw()
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw()
|
||||
if err == nil {
|
||||
logrus.Info("Environment: Openshift")
|
||||
return true
|
||||
|
||||
Reference in New Issue
Block a user