mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
251 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8f8b95bf57 | ||
|
|
a32789f13b | ||
|
|
77b725c598 | ||
|
|
cf40b431a3 | ||
|
|
e0a1c25fee | ||
|
|
a746df7a03 | ||
|
|
47190ec8b1 | ||
|
|
a00f7bf83e | ||
|
|
2b29700fa4 | ||
|
|
84e927fd60 | ||
|
|
2d1af6429e | ||
|
|
80a7f1ee9f | ||
|
|
fdc99f8dd3 | ||
|
|
d660f9dbdf | ||
|
|
ad03e5830d | ||
|
|
032c391cb8 | ||
|
|
9173f446ab | ||
|
|
f795fa2aec | ||
|
|
34c1f389bc | ||
|
|
fdc8a61fc6 | ||
|
|
c7f507a4b9 | ||
|
|
70aef8a871 | ||
|
|
54d0681340 | ||
|
|
b279aabae3 | ||
|
|
63022fe4d0 | ||
|
|
8c4523db69 | ||
|
|
646c64a326 | ||
|
|
5a9ccbf01f | ||
|
|
0f7403b7bf | ||
|
|
2bc83a26ff | ||
|
|
09babe46d9 | ||
|
|
451e4f636b | ||
|
|
2f8999e3cb | ||
|
|
9463cd5fc2 | ||
|
|
5e2f4a0826 | ||
|
|
0083edb3ca | ||
|
|
d020c666b7 | ||
|
|
3d29651267 | ||
|
|
795aae0c78 | ||
|
|
364d66b90f | ||
|
|
2d8f0336dc | ||
|
|
5716c1b35e | ||
|
|
fecd21deca | ||
|
|
5cd8b3d4ca | ||
|
|
991613bd13 | ||
|
|
ef93197da1 | ||
|
|
35754ccd73 | ||
|
|
d6d531e08e | ||
|
|
adff75f040 | ||
|
|
3e364186c9 | ||
|
|
edb482d4ba | ||
|
|
1f2d75898b | ||
|
|
7f331907d3 | ||
|
|
29aa52a1c7 | ||
|
|
ada8dbb5f3 | ||
|
|
cfe1754c44 | ||
|
|
2cfce5144b | ||
|
|
2fe863a054 | ||
|
|
3e01091d01 | ||
|
|
7f85a8e53b | ||
|
|
c679157e24 | ||
|
|
9e7b70964e | ||
|
|
8ebbb476b2 | ||
|
|
9263b812eb | ||
|
|
f70dd52b2d | ||
|
|
e0a8f1ad04 | ||
|
|
45dac417cb | ||
|
|
1514c5bcd2 | ||
|
|
e7cfafd6d6 | ||
|
|
15d7263c95 | ||
|
|
de21a400ab | ||
|
|
e702610dc6 | ||
|
|
481eeeffc4 | ||
|
|
801e1dabed | ||
|
|
5c44c1e8f5 | ||
|
|
0ef6dcb510 | ||
|
|
0ef5e75673 | ||
|
|
e5f85ae37b | ||
|
|
5d0e9ca70b | ||
|
|
297baa08d5 | ||
|
|
dd1433a7a9 | ||
|
|
9875c416df | ||
|
|
b414e3b350 | ||
|
|
e417e8bc12 | ||
|
|
aafe3365eb | ||
|
|
694baf715c | ||
|
|
48b188d7b4 | ||
|
|
d1cb53b65a | ||
|
|
cefd633176 | ||
|
|
08e6f81a15 | ||
|
|
edbad45637 | ||
|
|
363fbd3b77 | ||
|
|
82ee3ef3d1 | ||
|
|
21502e2bb4 | ||
|
|
9d3b70d4d2 | ||
|
|
5662919f72 | ||
|
|
445d0f870e | ||
|
|
81e74fe830 | ||
|
|
50791ad51a | ||
|
|
6a65657e27 | ||
|
|
1c7190884a | ||
|
|
e0fcc3bfa6 | ||
|
|
504b5a8eb0 | ||
|
|
488eaa9bef | ||
|
|
676c3703aa | ||
|
|
deec4df125 | ||
|
|
eedc8e81d0 | ||
|
|
28456ffafe | ||
|
|
a7c3ae37aa | ||
|
|
d043bcf7be | ||
|
|
72a1c59cac | ||
|
|
6299b1d8e9 | ||
|
|
11ae057b0a | ||
|
|
d34c99baf4 | ||
|
|
b7e83b74d8 | ||
|
|
919f75bb62 | ||
|
|
16079bd1d4 | ||
|
|
401d4227d1 | ||
|
|
7f9f32ca58 | ||
|
|
6937b8120b | ||
|
|
be80ce35b2 | ||
|
|
99349ce361 | ||
|
|
3a1b808169 | ||
|
|
798079eb53 | ||
|
|
98749f2c9b | ||
|
|
3a675696cd | ||
|
|
632eeaa527 | ||
|
|
9c85ce404b | ||
|
|
f8f8afca88 | ||
|
|
11dc048709 | ||
|
|
328442c121 | ||
|
|
4304880b6b | ||
|
|
aef99a7bb8 | ||
|
|
e387b21ed6 | ||
|
|
b6de33d501 | ||
|
|
48160e0414 | ||
|
|
d998c1a19e | ||
|
|
0b7ca82218 | ||
|
|
88fe843285 | ||
|
|
cf31fed9d3 | ||
|
|
ba7f7537fb | ||
|
|
499ecf9da5 | ||
|
|
6188811b94 | ||
|
|
0f2395309f | ||
|
|
41d4fa56ca | ||
|
|
bf21677357 | ||
|
|
c58d0965d5 | ||
|
|
ec1f7a68de | ||
|
|
68a353d097 | ||
|
|
c82886c921 | ||
|
|
4b9844f5c3 | ||
|
|
1e4016587c | ||
|
|
198e5631d4 | ||
|
|
f21ad29188 | ||
|
|
cd7ec500a7 | ||
|
|
030e51351a | ||
|
|
8703c150e0 | ||
|
|
1d7192180f | ||
|
|
9d1733200a | ||
|
|
841db3c829 | ||
|
|
1155c981d6 | ||
|
|
d29c99ffef | ||
|
|
3b572568be | ||
|
|
3e5b1d09b8 | ||
|
|
61b7a88462 | ||
|
|
d9d986f1f2 | ||
|
|
42a25bfe64 | ||
|
|
df667b9c15 | ||
|
|
f1c71731d9 | ||
|
|
e2b14ab7b5 | ||
|
|
4592bd4331 | ||
|
|
e380fbaf03 | ||
|
|
2bce1352a3 | ||
|
|
5b3b617f06 | ||
|
|
5040a4236a | ||
|
|
f6cbc005fc | ||
|
|
91774c941f | ||
|
|
db0e127563 | ||
|
|
3671d33447 | ||
|
|
e85176b5a7 | ||
|
|
7941de60ac | ||
|
|
565612e421 | ||
|
|
31e247e3ae | ||
|
|
1e79b86f72 | ||
|
|
b5b684c67b | ||
|
|
bbc6bd2dea | ||
|
|
61ce150d7c | ||
|
|
56e83ecde9 | ||
|
|
c33876508c | ||
|
|
55ea2e430e | ||
|
|
4beefc3f43 | ||
|
|
3b1d30141c | ||
|
|
fa75df8e96 | ||
|
|
21087aaddc | ||
|
|
766bc24241 | ||
|
|
8e3aad3b0e | ||
|
|
ce2866bf6a | ||
|
|
bcbaad8495 | ||
|
|
3346319082 | ||
|
|
139aa43c1c | ||
|
|
11fdd40e41 | ||
|
|
c4ce86cb0b | ||
|
|
dfe7e9b3ca | ||
|
|
1c29bfc084 | ||
|
|
c48e2bb8bb | ||
|
|
df40b5d02e | ||
|
|
aa26a2222b | ||
|
|
f9d1a967c7 | ||
|
|
b2e1d3f0dd | ||
|
|
24478a9dd4 | ||
|
|
160525bd1f | ||
|
|
d9158ab602 | ||
|
|
2b4cc64026 | ||
|
|
ccd7dcb867 | ||
|
|
5c5c555a7f | ||
|
|
273e4768f3 | ||
|
|
69e359e9fc | ||
|
|
e5352df348 | ||
|
|
f2b4e8e6c6 | ||
|
|
99a38bff8e | ||
|
|
d0aa627715 | ||
|
|
953cbe9d28 | ||
|
|
f7873aba7b | ||
|
|
f9728ecfff | ||
|
|
96a44153de | ||
|
|
cafbcbd2cb | ||
|
|
6397a35e32 | ||
|
|
aea8592880 | ||
|
|
2aa514a34c | ||
|
|
ac39bc4eba | ||
|
|
284d21686e | ||
|
|
00c0c11c76 | ||
|
|
96ebfa8e62 | ||
|
|
95d442d80f | ||
|
|
e4e58882ab | ||
|
|
ea71fc0eec | ||
|
|
462b225d92 | ||
|
|
d8728092f8 | ||
|
|
2c8ef70c43 | ||
|
|
4d2c8a451e | ||
|
|
f7927c85b1 | ||
|
|
2e2fd2a11b | ||
|
|
0e6ec1d36b | ||
|
|
85b33d9104 | ||
|
|
c838ecbbc7 | ||
|
|
068a5c1e64 | ||
|
|
4d559a1864 | ||
|
|
322142dd66 | ||
|
|
39f37b706c | ||
|
|
4e10dd4f80 | ||
|
|
eaf8e16414 |
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
23
.github/workflows/pull_request.yaml
vendored
23
.github/workflows/pull_request.yaml
vendored
@@ -7,9 +7,9 @@ on:
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.15.2
|
||||
GOLANG_VERSION: 1.20.1
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.7.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -18,17 +18,17 @@ jobs:
|
||||
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
uses: azure/setup-helm@v3.4
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
@@ -36,16 +36,21 @@ jobs:
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.51.1
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Helm Lint
|
||||
run: |
|
||||
cd deployments/kubernetes/chart/reloader
|
||||
helm lint
|
||||
|
||||
- name: Link check
|
||||
uses: gaurav-nelson/github-action-markdown-link-check@v1
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
|
||||
39
.github/workflows/push.yaml
vendored
39
.github/workflows/push.yaml
vendored
@@ -7,9 +7,9 @@ on:
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.15.2
|
||||
GOLANG_VERSION: 1.20.1
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.7.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
|
||||
jobs:
|
||||
@@ -20,18 +20,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
uses: azure/setup-helm@v3.4
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
@@ -39,10 +39,12 @@ jobs:
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.51.1
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
@@ -68,7 +70,7 @@ jobs:
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
@@ -106,7 +108,7 @@ jobs:
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
|
||||
##############################
|
||||
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
|
||||
##############################
|
||||
@@ -114,7 +116,7 @@ jobs:
|
||||
# Generate tag for operator without "v"
|
||||
- name: Generate Operator Tag
|
||||
id: generate_operator_tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: false
|
||||
@@ -127,6 +129,11 @@ jobs:
|
||||
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
|
||||
run: make bump-chart
|
||||
|
||||
- name: Helm Template
|
||||
run: |
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
|
||||
|
||||
# Publish helm chart
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
@@ -141,16 +148,16 @@ jobs:
|
||||
linting: on
|
||||
commit_username: stakater-user
|
||||
commit_email: stakater@gmail.com
|
||||
|
||||
|
||||
# Commit back changes
|
||||
- name: Commit files
|
||||
run: |
|
||||
git config --local user.email "stakater@gmail.com"
|
||||
git config --local user.name "stakater-user"
|
||||
git status
|
||||
git status
|
||||
git add .
|
||||
git commit -m "[skip-ci] Update artifacts" -a
|
||||
|
||||
|
||||
- name: Push changes
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
@@ -158,7 +165,7 @@ jobs:
|
||||
branch: ${{ github.ref }}
|
||||
|
||||
- name: Push Latest Tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
|
||||
6
.github/workflows/release.yaml
vendored
6
.github/workflows/release.yaml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: 1.15.2
|
||||
GOLANG_VERSION: 1.20.1
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -15,12 +15,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # See: https://goreleaser.com/ci/actions/
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
id: go
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -8,4 +8,5 @@ _gopath/
|
||||
.DS_Store
|
||||
.vscode
|
||||
vendor
|
||||
dist
|
||||
dist
|
||||
Reloader
|
||||
19
Dockerfile
19
Dockerfile
@@ -1,8 +1,13 @@
|
||||
ARG BUILDER_IMAGE
|
||||
ARG BASE_IMAGE
|
||||
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM} golang:1.15.2 as builder
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.20.1} as builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG GOPROXY
|
||||
ARG GOPRIVATE
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -19,14 +24,20 @@ COPY internal/ internal/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build -mod=mod -a -o manager main.go
|
||||
RUN CGO_ENABLED=0 \
|
||||
GOOS=${TARGETOS} \
|
||||
GOARCH=${TARGETARCH} \
|
||||
GOPROXY=${GOPROXY} \
|
||||
GOPRIVATE=${GOPRIVATE} \
|
||||
GO111MODULE=on \
|
||||
go build -mod=mod -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
FROM ${BASE_IMAGE:-gcr.io/distroless/static:nonroot}
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER nonroot:nonroot
|
||||
USER 65532:65532
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
58
Makefile
58
Makefile
@@ -1,48 +1,57 @@
|
||||
# note: call scripts from /scripts
|
||||
|
||||
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy release release-all manifest push clean-image
|
||||
.PHONY: default build build-image test stop push apply deploy release release-all manifest push
|
||||
|
||||
OS ?= linux
|
||||
ARCH ?= ???
|
||||
ALL_ARCH ?= arm64 arm amd64
|
||||
|
||||
BUILDER ?= reloader-builder-${ARCH}
|
||||
BUILDER_IMAGE ?=
|
||||
BASE_IMAGE ?=
|
||||
BINARY ?= Reloader
|
||||
DOCKER_IMAGE ?= stakater/reloader
|
||||
# Default value "dev"
|
||||
TAG ?= v0.0.75.0
|
||||
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${TAG}
|
||||
REPOSITORY_ARCH = ${DOCKER_IMAGE}:${TAG}-${ARCH}
|
||||
|
||||
# Default value "dev"
|
||||
VERSION ?= 0.0.1
|
||||
|
||||
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${VERSION}
|
||||
REPOSITORY_ARCH = ${DOCKER_IMAGE}:v${VERSION}-${ARCH}
|
||||
BUILD=
|
||||
|
||||
GOCMD = go
|
||||
GOFLAGS ?= $(GOFLAGS:)
|
||||
LDFLAGS =
|
||||
GOPROXY ?=
|
||||
GOPRIVATE ?=
|
||||
|
||||
default: build test
|
||||
|
||||
install:
|
||||
"$(GOCMD)" mod download
|
||||
|
||||
run:
|
||||
go run ./main.go
|
||||
|
||||
build:
|
||||
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
|
||||
|
||||
builder-image:
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
|
||||
reloader-${ARCH}.tar:
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
docker run --platform ${OS}/${ARCH} --rm "${BUILDER}" > reloader-${ARCH}.tar
|
||||
|
||||
binary-image: builder-image
|
||||
cat reloader-${ARCH}.tar | docker buildx build --platform ${OS}/${ARCH} -t "${REPOSITORY_ARCH}" --load -f Dockerfile.run -
|
||||
build-image:
|
||||
docker buildx build \
|
||||
--platform ${OS}/${ARCH} \
|
||||
--build-arg GOARCH=$(ARCH) \
|
||||
--build-arg BUILDER_IMAGE=$(BUILDER_IMAGE) \
|
||||
--build-arg BASE_IMAGE=${BASE_IMAGE} \
|
||||
--build-arg GOPROXY=${GOPROXY} \
|
||||
--build-arg GOPRIVATE=${GOPRIVATE} \
|
||||
-t "${REPOSITORY_ARCH}" \
|
||||
--load \
|
||||
-f Dockerfile \
|
||||
.
|
||||
|
||||
push:
|
||||
docker push ${REPOSITORY_ARCH}
|
||||
|
||||
release: binary-image push manifest
|
||||
release: build-image push manifest
|
||||
|
||||
release-all:
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
@@ -66,23 +75,6 @@ test:
|
||||
stop:
|
||||
@docker stop "${BINARY}"
|
||||
|
||||
clean-images: stop
|
||||
-docker rmi "${BINARY}"
|
||||
@for arch in $(ALL_ARCH) ; do \
|
||||
echo Clean image: $$arch ; \
|
||||
make clean-image ARCH=$$arch ; \
|
||||
done
|
||||
-docker rmi "${REPOSITORY_GENERIC}"
|
||||
|
||||
clean-image:
|
||||
-docker rmi "${BUILDER}"
|
||||
-docker rmi "${REPOSITORY_ARCH}"
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
|
||||
clean:
|
||||
"$(GOCMD)" clean -i
|
||||
-rm -rf reloader-*.tar
|
||||
|
||||
apply:
|
||||
kubectl apply -f deployments/manifests/ -n temp-reloader
|
||||
|
||||
|
||||
73
README.md
73
README.md
@@ -6,8 +6,6 @@
|
||||
[](https://github.com/stakater/reloader/releases/latest)
|
||||
[](https://hub.docker.com/r/stakater/reloader/)
|
||||
[](https://hub.docker.com/r/stakater/reloader/)
|
||||
[](https://microbadger.com/images/stakater/reloader)
|
||||
[](https://microbadger.com/images/stakater/reloader)
|
||||
[](LICENSE)
|
||||
[](http://stakater.com/?utm_source=Reloader&utm_medium=github)
|
||||
|
||||
@@ -33,7 +31,8 @@ metadata:
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
@@ -88,7 +87,8 @@ metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
Use comma separated list to define multiple configmaps.
|
||||
@@ -99,7 +99,8 @@ metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### Secret
|
||||
@@ -114,7 +115,8 @@ metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
Use comma separated list to define multiple secrets.
|
||||
@@ -125,7 +127,8 @@ metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
|
||||
spec:
|
||||
template: metadata:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### NOTES
|
||||
@@ -140,8 +143,22 @@ spec:
|
||||
- you may override the configmap annotation with the `--configmap-annotation` flag
|
||||
- you may override the secret annotation with the `--secret-annotation` flag
|
||||
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
|
||||
- you may want to watch only a set of namespaces with certain labels by using the `--namespace-selector` flag
|
||||
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
|
||||
- you can configure logging in JSON format with the `--log-format=json` option
|
||||
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
|
||||
|
||||
## Reload Strategies
|
||||
Reloader supports multiple "reload" strategies for performing rolling upgrades to resources. The following list describes them:
|
||||
- **env-vars**: When a tracked `configMap`/`secret` is updated, this strategy attaches a Reloader specific environment variable to any containers
|
||||
referencing the changed `configMap` or `secret` on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.).
|
||||
This strategy can be specified with the `--reload-strategy=env-vars` argument. Note: This is the default reload strategy.
|
||||
- **annotations**: When a tracked `configMap`/`secret` is updated, this strategy attaches a `reloader.stakater.com/last-reloaded-from` pod template annotation
|
||||
on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy is useful when using resource syncing tools like ArgoCD, since it will not cause these tools
|
||||
to detect configuration drift after a resource is reloaded. Note: Since the attached pod template annotation only tracks the last reload source, this strategy will reload any tracked resource should its
|
||||
`configMap` or `secret` be deleted and recreated.
|
||||
This strategy can be specified with the `--reload-strategy=annotations` argument.
|
||||
|
||||
|
||||
## Deploying to Kubernetes
|
||||
|
||||
@@ -166,6 +183,25 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
|
||||
|
||||
Reloader can be configured to watch only namespaces labeled with (one or more) labels of your choosing by using the `--namespace-selector` parameter, for example:
|
||||
```
|
||||
--namespace-selector=reloder:enabled,test:true
|
||||
```
|
||||
|
||||
Only namespaces labeled like the following namespace YAML will be watched:
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
...
|
||||
labels:
|
||||
reloder: enabled
|
||||
test: true
|
||||
...
|
||||
```
|
||||
If you want to select namespace only by the key of the label use ```*``` as the value.
|
||||
For example, for ```--namespace-selector=select-this:*``` all namespaces with label-key "select-this" will be selected regardless of the labels value
|
||||
|
||||
### Vanilla kustomize
|
||||
|
||||
You can also apply the vanilla manifests by running the following command
|
||||
@@ -217,10 +253,33 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
|
||||
|
||||
Reloader can be configured to watch only namespaces labeled with (one or more) labels of your choosing by using the `namespaceSelector` parameter
|
||||
|
||||
| Parameter | Description | Type |
|
||||
| ---------------- | -------------------------------------------------------------- | ------- |
|
||||
| namespaceSelector | list of comma separated key:value namespace | string |
|
||||
|
||||
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
|
||||
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
|
||||
|
||||
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
|
||||
|
||||
| Parameter | Description | Type |
|
||||
|------------------|------------------------------------------------------------------------------------------------------------------------------------------| ------- |
|
||||
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
|
||||
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
|
||||
| reloadOnCreate | Enable reload on create events. Valid value are either `true` or `false` | boolean |
|
||||
| syncAfterRestart | Enable sync after reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean |
|
||||
|
||||
**ReloadOnCreate** reloadOnCreate controls how Reloader handles secrets being added to the cache for the first time. If reloadOnCreate is set to true:
|
||||
* Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload.
|
||||
* When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload.
|
||||
* If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected.
|
||||
|
||||
If ReloadOnCreate is set to false:
|
||||
* Updates to configMaps/Secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration.
|
||||
|
||||
## Help
|
||||
|
||||
### Documentation
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
FROM golang:1.15.2-alpine
|
||||
LABEL maintainer "Stakater Team"
|
||||
|
||||
ARG GOARCH=amd64
|
||||
|
||||
RUN apk -v --update \
|
||||
--no-cache \
|
||||
add git build-base
|
||||
|
||||
WORKDIR "$GOPATH/src/github.com/stakater/Reloader"
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0 GOOS=linux GOARCH=$GOARCH
|
||||
|
||||
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
|
||||
|
||||
COPY build/package/Dockerfile.run /
|
||||
|
||||
# Running this image produces a tarball suitable to be piped into another
|
||||
# Docker build command.
|
||||
CMD tar -cf - -C / Dockerfile.run Reloader
|
||||
@@ -1,14 +0,0 @@
|
||||
FROM alpine:3.11
|
||||
LABEL maintainer "Stakater Team"
|
||||
|
||||
RUN apk add --update --no-cache ca-certificates
|
||||
|
||||
COPY Reloader /bin/Reloader
|
||||
|
||||
# On alpine 'nobody' has uid 65534
|
||||
USER 65534
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/bin/Reloader"]
|
||||
@@ -3,29 +3,19 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: v0.0.92
|
||||
appVersion: v0.0.92
|
||||
version: v1.0.13
|
||||
appVersion: v1.0.13
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
home: https://github.com/stakater/Reloader
|
||||
sources:
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
- https://github.com/stakater/Reloader
|
||||
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
|
||||
maintainers:
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
email: rasheed@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
- name: ahmedwaleedmalik
|
||||
email: waleed@stakater.com
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
- For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap"
|
||||
{{ .Values.reloader.custom_annotations.configmap | default "configmap.reloader.stakater.com/reload" }}: "foo-configmap"
|
||||
|
||||
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
{{ .Values.reloader.custom_annotations.secret | default "secret.reloader.stakater.com/reload" }}: "foo-secret"
|
||||
|
||||
- After successful installation, your pods will get rolling updates when a change in data of configmap or secret will happen.
|
||||
|
||||
@@ -28,6 +28,23 @@ heritage: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create pod anti affinity labels
|
||||
*/}}
|
||||
{{- define "reloader-podAntiAffinity" -}}
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ template "reloader-fullname" . }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
@@ -45,4 +62,4 @@ Create the annotations to support helm3
|
||||
{{- define "reloader-helm3.annotations" -}}
|
||||
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
|
||||
meta.helm.sh/release-name: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -17,7 +17,6 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -32,7 +31,15 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -44,7 +51,7 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
@@ -77,4 +84,21 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -17,7 +17,6 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
@@ -25,5 +24,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -15,8 +15,13 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
{{- if not (.Values.reloader.enableHA) }}
|
||||
replicas: 1
|
||||
{{- else }}
|
||||
replicas: {{ .Values.reloader.deployment.replicas }}
|
||||
{{- end}}
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -40,13 +45,20 @@ spec:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.reloader.deployment.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.reloader.deployment.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
{{- if or (.Values.reloader.deployment.affinity) (.Values.reloader.enableHA) }}
|
||||
affinity:
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
{{ toYaml .Values.reloader.deployment.affinity | indent 8 }}
|
||||
{{- end}}
|
||||
{{ include "reloader-podAntiAffinity" . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.tolerations }}
|
||||
tolerations:
|
||||
@@ -59,7 +71,7 @@ spec:
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
|
||||
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (.Values.reloader.deployment.env.existing) (eq .Values.reloader.watchGlobally false) (.Values.reloader.enableHA)}}
|
||||
env:
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.open }}
|
||||
{{- if not (empty $value) }}
|
||||
@@ -77,6 +89,17 @@ spec:
|
||||
key: {{ $name | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $secret, $values := .Values.reloader.deployment.env.existing }}
|
||||
{{- range $name, $key := $values }}
|
||||
{{- if not ( empty $name) }}
|
||||
- name: {{ $name | quote }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ $secret | quote }}
|
||||
key: {{ $key | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.field }}
|
||||
{{- if not ( empty $value) }}
|
||||
- name: {{ $name | quote }}
|
||||
@@ -91,6 +114,16 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
ports:
|
||||
@@ -98,19 +131,35 @@ spec:
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.livenessProbe.successThreshold | default "1" }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
|
||||
|
||||
{{- $containerSecurityContext := .Values.reloader.deployment.containerSecurityContext | default dict }}
|
||||
{{- if .Values.reloader.readOnlyRootFileSystem }}
|
||||
{{- $_ := set $containerSecurityContext "readOnlyRootFilesystem" true }}
|
||||
{{- end }}
|
||||
|
||||
securityContext:
|
||||
{{- toYaml $containerSecurityContext | nindent 10 }}
|
||||
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA)}}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
@@ -124,7 +173,9 @@ spec:
|
||||
{{- if .Values.reloader.ignoreNamespaces }}
|
||||
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
- "--namespace-selector={{ .Values.reloader.namespaceSelector }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
@@ -150,6 +201,18 @@ spec:
|
||||
{{- if eq .Values.reloader.isArgoRollouts true }}
|
||||
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.reloadOnCreate true }}
|
||||
- "--reload-on-create={{ .Values.reloader.reloadOnCreate }}"
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.syncAfterRestart true }}
|
||||
- "--sync-after-restart={{ .Values.reloader.syncAfterRestart }}"
|
||||
{{- end }}
|
||||
{{- if ne .Values.reloader.reloadStrategy "default" }}
|
||||
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
|
||||
{{- end }}
|
||||
{{- if or (gt .Values.reloader.deployment.replicas 1.0) (.Values.reloader.enableHA) }}
|
||||
- "--enable-ha=true"
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.resources }}
|
||||
resources:
|
||||
@@ -159,6 +222,9 @@ spec:
|
||||
securityContext: {{ toYaml .Values.reloader.deployment.securityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "reloader-serviceAccountName" . }}
|
||||
{{- if hasKey .Values.reloader.deployment "automountServiceAccountToken" }}
|
||||
automountServiceAccountToken: {{ .Values.reloader.deployment.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
{{- if .Values.reloader.podDisruptionBudget.enabled }}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
spec:
|
||||
minAvailable: {{ .Values.reloader.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
{{- end }}
|
||||
@@ -1,26 +1,53 @@
|
||||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.podMonitor.enabled ) }}
|
||||
{{- if ( .Values.reloader.podMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
{{- if .Values.reloader.podMonitor.annotations }}
|
||||
annotations:
|
||||
{{ tpl (toYaml .Values.reloader.podMonitor.annotations) . | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.podMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
|
||||
{{ tpl (toYaml .Values.reloader.podMonitor.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.podMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.podMonitor.namespace }}
|
||||
namespace: {{ tpl .Values.reloader.podMonitor.namespace . }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.podMonitor.interval }}
|
||||
interval: {{ .Values.reloader.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.podMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.scheme }}
|
||||
scheme: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.bearerTokenSecret }}
|
||||
bearerTokenSecret: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.tlsConfig }}
|
||||
tlsConfig:
|
||||
{{- toYaml .| nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.timeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
honorLabels: {{ .Values.reloader.podMonitor.honorLabels }}
|
||||
{{- with .Values.reloader.podMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{- tpl (toYaml . | nindent 6) $ }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.podTargetLabels }}
|
||||
podTargetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
|
||||
@@ -17,7 +17,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -32,7 +32,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -44,7 +44,7 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
@@ -77,4 +77,21 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -17,7 +17,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -25,5 +25,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
21
deployments/kubernetes/chart/reloader/templates/secret.yaml
Normal file
21
deployments/kubernetes/chart/reloader/templates/secret.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
{{- if .Values.reloader.deployment.env.secret -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{ if .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD -}}
|
||||
ALERT_ON_RELOAD: {{ .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_SINK -}}
|
||||
ALERT_SINK: {{ .Values.reloader.deployment.env.secret.ALERT_SINK | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL -}}
|
||||
ALERT_WEBHOOK_URL: {{ .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO -}}
|
||||
ALERT_ADDITIONAL_INFO: {{ .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -13,6 +13,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
|
||||
@@ -4,6 +4,9 @@ kind: ServiceAccount
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.reloader.serviceAccount "automountServiceAccountToken" }}
|
||||
automountServiceAccountToken: {{ .Values.reloader.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
@@ -19,4 +22,5 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -2,25 +2,52 @@
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
{{- if .Values.reloader.serviceMonitor.annotations }}
|
||||
annotations:
|
||||
{{ tpl (toYaml .Values.reloader.serviceMonitor.annotations) . | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.serviceMonitor.labels | indent 4}}
|
||||
{{ tpl (toYaml .Values.reloader.serviceMonitor.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.serviceMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.serviceMonitor.namespace }}
|
||||
namespace: {{ tpl .Values.reloader.serviceMonitor.namespace . }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- targetPort: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.serviceMonitor.interval }}
|
||||
interval: {{ .Values.reloader.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.serviceMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.serviceMonitor.timeout }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.scheme }}
|
||||
scheme: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.bearerTokenFile }}
|
||||
bearerTokenFile: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.tlsConfig }}
|
||||
tlsConfig:
|
||||
{{- toYaml .| nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.timeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
honorLabels: {{ .Values.reloader.serviceMonitor.honorLabels }}
|
||||
{{- with .Values.reloader.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{- tpl (toYaml . | nindent 6) $ }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
suite: Deployment
|
||||
|
||||
templates:
|
||||
- deployment.yaml
|
||||
|
||||
tests:
|
||||
- it: sets readOnlyRootFilesystem in container securityContext when reloader.readOnlyRootFileSystem is true
|
||||
set:
|
||||
reloader:
|
||||
readOnlyRootFileSystem: true
|
||||
deployment:
|
||||
containerSecurityContext:
|
||||
readOnlyRootFilesystem: false
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
||||
value: true
|
||||
|
||||
- it: sets readOnlyRootFilesystem in container securityContext even if reloader.deployment.containerSecurityContext is null
|
||||
set:
|
||||
reloader:
|
||||
readOnlyRootFileSystem: true
|
||||
deployment:
|
||||
containerSecurityContext: null
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
||||
value: true
|
||||
|
||||
- it: does not override readOnlyRootFilesystem in container securityContext based on reloader.readOnlyRootFileSystem
|
||||
set:
|
||||
reloader:
|
||||
readOnlyRootFileSystem: false
|
||||
deployment:
|
||||
containerSecurityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
||||
value: true
|
||||
|
||||
- it: template is still valid with no defined containerSecurityContext
|
||||
set:
|
||||
reloader:
|
||||
readOnlyRootFileSystem: false
|
||||
deployment:
|
||||
containerSecurityContext: null
|
||||
asserts:
|
||||
- isEmpty:
|
||||
path: spec.template.spec.containers[0].securityContext
|
||||
19
deployments/kubernetes/chart/reloader/values.schema.json
Normal file
19
deployments/kubernetes/chart/reloader/values.schema.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reloader": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reloadStrategy": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"default",
|
||||
"env-vars",
|
||||
"annotations"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,20 +8,31 @@ global:
|
||||
kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
reloadOnCreate: false
|
||||
syncAfterRestart: false
|
||||
reloadStrategy: default # Set to default, env-vars or annotations
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
namespaceSelector: "" # Comma separated list of 'key:value' labels for namespaces selection
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
# Set to true to enable leadership election allowing you to run multiple replicas
|
||||
enableHA: false
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
# If you wish to run multiple replicas set reloader.enableHA = true
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
|
||||
@@ -40,6 +51,13 @@ reloader:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# allowPrivilegeEscalation: false
|
||||
# readOnlyRootFilesystem: true
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
@@ -52,10 +70,10 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v0.0.92
|
||||
version: v1.0.13
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: v0.0.92
|
||||
tag: v1.0.13
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -63,8 +81,33 @@ reloader:
|
||||
open:
|
||||
# secret supports Key value pair as environment variables. It gets the values based on keys from default reloader secret if any.
|
||||
secret:
|
||||
# ALERT_ON_RELOAD: <"true"|"false">
|
||||
# ALERT_SINK: <"slack"> # By default it will be a raw text based webhook
|
||||
# ALERT_WEBHOOK_URL: <"webhook_url">
|
||||
# ALERT_ADDITIONAL_INFO: <"Additional Info like Cluster Name if needed">
|
||||
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
|
||||
field:
|
||||
# existing secret, you can specify multiple existing secrets, for each
|
||||
# specify the env var name followed by the key in existing secret that
|
||||
# will be used to populate the env var
|
||||
existing:
|
||||
# existing_secret_name:
|
||||
# ALERT_ON_RELOAD: alert_on_reload_key
|
||||
# ALERT_SINK: alert_sink_key
|
||||
# ALERT_WEBHOOK_URL: alert_webhook_key
|
||||
# ALERT_ADDITIONAL_INFO: alert_additional_info_key
|
||||
|
||||
# Liveness and readiness probe timeout values.
|
||||
livenessProbe: {}
|
||||
# timeoutSeconds: 5
|
||||
# failureThreshold: 5
|
||||
# periodSeconds: 10
|
||||
# successThreshold: 1
|
||||
readinessProbe: {}
|
||||
# timeoutSeconds: 15
|
||||
# failureThreshold: 5
|
||||
# periodSeconds: 10
|
||||
# successThreshold: 1
|
||||
|
||||
# Specify resource requests/limits for the deployment.
|
||||
# Example:
|
||||
@@ -79,6 +122,8 @@ reloader:
|
||||
pod:
|
||||
annotations: {}
|
||||
priorityClassName: ""
|
||||
# imagePullSecrets:
|
||||
# - name: myregistrykey
|
||||
|
||||
service: {}
|
||||
# labels: {}
|
||||
@@ -103,26 +148,110 @@ reloader:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
|
||||
serviceMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
# Deprecated: Service monitor will be removed in future releases of reloader in favour of Pod monitor
|
||||
# Enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
# Fallback to the prometheus default unless specified
|
||||
# interval: 10s
|
||||
|
||||
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
||||
# scheme: ""
|
||||
|
||||
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
|
||||
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
||||
# tlsConfig: {}
|
||||
|
||||
# bearerTokenFile:
|
||||
# Fallback to the prometheus default unless specified
|
||||
# timeout: 30s
|
||||
|
||||
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||||
labels: {}
|
||||
|
||||
## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||||
annotations: {}
|
||||
|
||||
# Retain the job and instance labels of the metrics pushed to the Pushgateway
|
||||
# [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape)
|
||||
honorLabels: true
|
||||
|
||||
## Metric relabel configs to apply to samples before ingestion.
|
||||
## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
|
||||
metricRelabelings: []
|
||||
# - action: keep
|
||||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||||
# sourceLabels: [__name__]
|
||||
|
||||
## Relabel configs to apply to samples before ingestion.
|
||||
## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
|
||||
relabelings: []
|
||||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
# separator: ;
|
||||
# regex: ^(.*)$
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
targetLabels: []
|
||||
|
||||
podMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
# Fallback to the prometheus default unless specified
|
||||
# interval: 10s
|
||||
|
||||
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
||||
# scheme: ""
|
||||
|
||||
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
|
||||
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
||||
# tlsConfig: {}
|
||||
|
||||
# bearerTokenSecret:
|
||||
# Fallback to the prometheus default unless specified
|
||||
# timeout: 30s
|
||||
|
||||
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||||
labels: {}
|
||||
|
||||
## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||||
annotations: {}
|
||||
|
||||
# Retain the job and instance labels of the metrics pushed to the Pushgateway
|
||||
# [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape)
|
||||
honorLabels: true
|
||||
|
||||
## Metric relabel configs to apply to samples before ingestion.
|
||||
## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
|
||||
metricRelabelings: []
|
||||
# - action: keep
|
||||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||||
# sourceLabels: [__name__]
|
||||
|
||||
## Relabel configs to apply to samples before ingestion.
|
||||
## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
|
||||
relabelings: []
|
||||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
# separator: ;
|
||||
# regex: ^(.*)$
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
podTargetLabels: []
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
# Set the minimum available replicas
|
||||
# minAvailable: 1
|
||||
|
||||
@@ -4,7 +4,5 @@ kind: Kustomization
|
||||
resources:
|
||||
- manifests/clusterrole.yaml
|
||||
- manifests/clusterrolebinding.yaml
|
||||
- manifests/role.yaml
|
||||
- manifests/rolebinding.yaml
|
||||
- manifests/serviceaccount.yaml
|
||||
- manifests/deployment.yaml
|
||||
- manifests/deployment.yaml
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -9,12 +9,11 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -46,4 +45,10 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -9,12 +9,11 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
@@ -23,4 +22,3 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
|
||||
@@ -8,15 +8,15 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v1.0.13
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
@@ -28,17 +28,16 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v1.0.13
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
- image: "stakater/reloader:v1.0.13"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
@@ -47,15 +46,24 @@ spec:
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
|
||||
securityContext:
|
||||
{}
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/podmonitor.yaml
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -9,9 +8,9 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
|
||||
namespace: default
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
@@ -1,7 +1,23 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -9,12 +25,11 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -46,11 +61,17 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -58,12 +79,11 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
@@ -72,7 +92,6 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
@@ -83,15 +102,15 @@ metadata:
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v1.0.13
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
@@ -103,17 +122,16 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
chart: "reloader-v1.0.13"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
version: v1.0.13
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
- image: "stakater/reloader:v1.0.13"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
@@ -122,48 +140,24 @@ spec:
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
|
||||
securityContext:
|
||||
{}
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@ reloader:
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
reloadOnCreate: false
|
||||
reloadStrategy: default # Set to default, env-vars or annotations
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
@@ -22,6 +24,7 @@ reloader:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
|
||||
@@ -40,6 +43,13 @@ reloader:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# allowPrivilegeEscalation: false
|
||||
# readOnlyRootFilesystem: true
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
|
||||
12
docs/Alerting.md
Normal file
12
docs/Alerting.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Alerting on Reload
|
||||
Reloader can alert when it triggers a rolling upgrade on Deployments or StatefulSets. Webhook notification alert would be sent to the configured webhook server with all the required information
|
||||
|
||||
#### Enabling the feature
|
||||
In-order to enable this feature, you need to update the reloader.env.secret section of values.yaml providing the information needed for alert.
|
||||
<pre> ALERT_ON_RELOAD: [ true/false ] Default: false
|
||||
ALERT_SINK: [ slack/webhook ] Default: webhook
|
||||
ALERT_WEBHOOK_URL: Required if ALERT_ON_RELOAD is true
|
||||
ALERT_ADDITIONAL_INFO: Any additional information to be added to alert
|
||||
|
||||
#### Slack incoming-webhook creation docs
|
||||
https://api.slack.com/messaging/webhooks
|
||||
119
go.mod
119
go.mod
@@ -1,44 +1,91 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.15
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v1.0.1
|
||||
github.com/onsi/ginkgo v1.15.1 // indirect
|
||||
github.com/onsi/gomega v1.11.0 // indirect
|
||||
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
|
||||
github.com/argoproj/argo-rollouts v1.4.1
|
||||
github.com/openshift/api v3.9.0+incompatible
|
||||
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
|
||||
github.com/prometheus/client_golang v1.10.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/cobra v1.1.3
|
||||
k8s.io/api v0.21.1
|
||||
k8s.io/apimachinery v0.21.1
|
||||
k8s.io/client-go v0.21.1
|
||||
github.com/parnurzeal/gorequest v0.2.16
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
k8s.io/api v0.26.2
|
||||
k8s.io/apimachinery v0.26.2
|
||||
k8s.io/client-go v0.26.2
|
||||
k8s.io/kubectl v0.26.2
|
||||
)
|
||||
|
||||
replace (
|
||||
k8s.io/api => k8s.io/api v0.20.4
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.20.4
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.20.4
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.4
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.4
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.20.4
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.20.4
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.20.4
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.4
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.4
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.4
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.4
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.4
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.20.4
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.20.4
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.4
|
||||
k8s.io/metrics => k8s.io/metrics v0.20.4
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.4
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/smartystreets/goconvey v1.7.2 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.80.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 // indirect
|
||||
k8s.io/utils v0.0.0-20230202215443-34013725500c // indirect
|
||||
moul.io/http2curl v1.0.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
// Replacements for argo-rollouts
|
||||
replace (
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
k8s.io/api v0.0.0 => k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.26.1
|
||||
k8s.io/client-go v0.0.0 => k8s.io/client-go v0.26.1
|
||||
k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2
|
||||
k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2
|
||||
k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0
|
||||
k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.24.2
|
||||
k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.24.2
|
||||
k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2
|
||||
k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2
|
||||
k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2
|
||||
k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.26.1
|
||||
k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2
|
||||
k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2
|
||||
k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.20.5-rc.0
|
||||
k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.24.2
|
||||
k8s.io/sample-cli-plugin v0.0.0 => k8s.io/sample-cli-plugin v0.24.2
|
||||
k8s.io/sample-controller v0.0.0 => k8s.io/sample-controller v0.24.2
|
||||
)
|
||||
|
||||
94
internal/pkg/alerts/alert.go
Normal file
94
internal/pkg/alerts/alert.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package alert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// function to send alert msg to webhook service
|
||||
func SendWebhookAlert(msg string) {
|
||||
webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL")
|
||||
if !ok {
|
||||
logrus.Error("ALERT_WEBHOOK_URL env variable not provided")
|
||||
return
|
||||
}
|
||||
webhook_url = strings.TrimSpace(webhook_url)
|
||||
alert_sink := os.Getenv("ALERT_SINK")
|
||||
alert_sink = strings.ToLower(strings.TrimSpace(alert_sink))
|
||||
|
||||
// Provision to add Proxy to reach webhook server if required
|
||||
webhook_proxy := os.Getenv("ALERT_WEBHOOK_PROXY")
|
||||
webhook_proxy = strings.TrimSpace(webhook_proxy)
|
||||
|
||||
// Provision to add Additional information in the alert. e.g ClusterName
|
||||
alert_additional_info, ok := os.LookupEnv("ALERT_ADDITIONAL_INFO")
|
||||
if ok {
|
||||
alert_additional_info = strings.TrimSpace(alert_additional_info)
|
||||
msg = fmt.Sprintf("%s : %s", alert_additional_info, msg)
|
||||
}
|
||||
|
||||
if alert_sink == "slack" {
|
||||
sendSlackAlert(webhook_url, webhook_proxy, msg)
|
||||
} else {
|
||||
msg = strings.Replace(msg, "*", "", -1)
|
||||
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// function to handle server redirection
|
||||
func redirectPolicy(req gorequest.Request, via []gorequest.Request) error {
|
||||
return fmt.Errorf("incorrect token (redirection)")
|
||||
}
|
||||
|
||||
// function to send alert to slack
|
||||
func sendSlackAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
attachment := Attachment{
|
||||
Text: msg,
|
||||
Color: "good",
|
||||
AuthorName: "Reloader",
|
||||
}
|
||||
|
||||
payload := WebhookMessage{
|
||||
Attachments: []Attachment{attachment},
|
||||
}
|
||||
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(payload).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// function to send alert to webhook service as text
|
||||
func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
Type("text").
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(msg).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
61
internal/pkg/alerts/slack_alert.go
Normal file
61
internal/pkg/alerts/slack_alert.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package alert
|
||||
|
||||
type WebhookMessage struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
IconEmoji string `json:"icon_emoji,omitempty"`
|
||||
IconURL string `json:"icon_url,omitempty"`
|
||||
Channel string `json:"channel,omitempty"`
|
||||
ThreadTimestamp string `json:"thread_ts,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Attachments []Attachment `json:"attachments,omitempty"`
|
||||
Parse string `json:"parse,omitempty"`
|
||||
ResponseType string `json:"response_type,omitempty"`
|
||||
ReplaceOriginal bool `json:"replace_original,omitempty"`
|
||||
DeleteOriginal bool `json:"delete_original,omitempty"`
|
||||
ReplyBroadcast bool `json:"reply_broadcast,omitempty"`
|
||||
}
|
||||
|
||||
type Attachment struct {
|
||||
Color string `json:"color,omitempty"`
|
||||
Fallback string `json:"fallback,omitempty"`
|
||||
|
||||
CallbackID string `json:"callback_id,omitempty"`
|
||||
ID int `json:"id,omitempty"`
|
||||
|
||||
AuthorID string `json:"author_id,omitempty"`
|
||||
AuthorName string `json:"author_name,omitempty"`
|
||||
AuthorSubname string `json:"author_subname,omitempty"`
|
||||
AuthorLink string `json:"author_link,omitempty"`
|
||||
AuthorIcon string `json:"author_icon,omitempty"`
|
||||
|
||||
Title string `json:"title,omitempty"`
|
||||
TitleLink string `json:"title_link,omitempty"`
|
||||
Pretext string `json:"pretext,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
|
||||
ImageURL string `json:"image_url,omitempty"`
|
||||
ThumbURL string `json:"thumb_url,omitempty"`
|
||||
|
||||
ServiceName string `json:"service_name,omitempty"`
|
||||
ServiceIcon string `json:"service_icon,omitempty"`
|
||||
FromURL string `json:"from_url,omitempty"`
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
|
||||
MarkdownIn []string `json:"mrkdwn_in,omitempty"`
|
||||
|
||||
Footer string `json:"footer,omitempty"`
|
||||
FooterIcon string `json:"footer_icon,omitempty"`
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Title string `json:"title"`
|
||||
Value string `json:"value"`
|
||||
Short bool `json:"short"`
|
||||
}
|
||||
|
||||
type Action struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text"`
|
||||
Url string `json:"url"`
|
||||
Style string `json:"style"`
|
||||
}
|
||||
@@ -2,39 +2,40 @@ package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
)
|
||||
|
||||
//ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kube.Clients, string) []interface{}
|
||||
// ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kube.Clients, string) []runtime.Object
|
||||
|
||||
//ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(interface{}) []v1.Container
|
||||
// ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(runtime.Object) []v1.Container
|
||||
|
||||
//InitContainersFunc is a generic func to return containers
|
||||
type InitContainersFunc func(interface{}) []v1.Container
|
||||
// InitContainersFunc is a generic func to return containers
|
||||
type InitContainersFunc func(runtime.Object) []v1.Container
|
||||
|
||||
//VolumesFunc is a generic func to return volumes
|
||||
type VolumesFunc func(interface{}) []v1.Volume
|
||||
// VolumesFunc is a generic func to return volumes
|
||||
type VolumesFunc func(runtime.Object) []v1.Volume
|
||||
|
||||
//UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kube.Clients, string, interface{}) error
|
||||
// UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kube.Clients, string, runtime.Object) error
|
||||
|
||||
//AnnotationsFunc is a generic func to return annotations
|
||||
type AnnotationsFunc func(interface{}) map[string]string
|
||||
// AnnotationsFunc is a generic func to return annotations
|
||||
type AnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
//PodAnnotationsFunc is a generic func to return annotations
|
||||
type PodAnnotationsFunc func(interface{}) map[string]string
|
||||
// PodAnnotationsFunc is a generic func to return annotations
|
||||
type PodAnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
// RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
type RollingUpgradeFuncs struct {
|
||||
ItemsFunc ItemsFunc
|
||||
AnnotationsFunc AnnotationsFunc
|
||||
@@ -47,209 +48,260 @@ type RollingUpgradeFuncs struct {
|
||||
}
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deployments %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deployments.Items)
|
||||
|
||||
items := make([]runtime.Object, len(deployments.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deployments.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
items[i] = &deployments.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list daemonSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(daemonSets.Items)
|
||||
|
||||
items := make([]runtime.Object, len(daemonSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range daemonSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &daemonSets.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetStatefulSetItems returns the statefulSets in given namespace
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list statefulSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(statefulSets.Items)
|
||||
|
||||
items := make([]runtime.Object, len(statefulSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range statefulSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &statefulSets.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deploymentConfigs %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deploymentConfigs.Items)
|
||||
|
||||
items := make([]runtime.Object, len(deploymentConfigs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deploymentConfigs.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &deploymentConfigs.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetRolloutItems returns the rollouts in given namespace
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list Rollouts %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(rollouts.Items)
|
||||
|
||||
items := make([]runtime.Object, len(rollouts.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range rollouts.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &rollouts.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDeploymentAnnotations returns the annotations of given deployment
|
||||
func GetDeploymentAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).ObjectMeta.Annotations
|
||||
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.Deployment).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetAnnotations returns the annotations of given daemonSet
|
||||
func GetDaemonSetAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetAnnotations returns the annotations of given statefulSet
|
||||
func GetStatefulSetAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
|
||||
func GetDeploymentConfigAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
func GetDeploymentConfigAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutAnnotations returns the annotations of given rollout
|
||||
func GetRolloutAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
func GetRolloutAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
|
||||
func GetDeploymentPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
|
||||
func GetDaemonSetPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
|
||||
func GetStatefulSetPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
|
||||
func GetDeploymentConfigPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
func GetDeploymentConfigPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
|
||||
func GetRolloutPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentContainers returns the containers of given deployment
|
||||
func GetDeploymentContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.Containers
|
||||
func GetDeploymentContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDaemonSetContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
func GetDaemonSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetStatefulSetContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.Containers
|
||||
func GetStatefulSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
func GetDeploymentConfigContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetRolloutContainers returns the containers of given rollout
|
||||
func GetRolloutContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
func GetRolloutContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentInitContainers returns the containers of given deployment
|
||||
func GetDeploymentInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.InitContainers
|
||||
func GetDeploymentInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDaemonSetInitContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
func GetDaemonSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetStatefulSetInitContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
func GetStatefulSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigInitContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
func GetDeploymentConfigInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetRolloutInitContainers returns the containers of given rollout
|
||||
func GetRolloutInitContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
func GetRolloutInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deployment := resource.(appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), &deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deployment := resource.(*appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
daemonSet := resource.(appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), &daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
daemonSet := resource.(*appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulSet performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
statefulSet := resource.(appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), &statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
statefulSet := resource.(*appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), &deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deploymentConfig := resource.(*openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
rollout := resource.(argorolloutv1alpha1.Rollout)
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
rollout := resource.(*argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
|
||||
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), &rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDeploymentVolumes returns the Volumes of given deployment
|
||||
func GetDeploymentVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.Volumes
|
||||
func GetDeploymentVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDaemonSetVolumes returns the Volumes of given daemonSet
|
||||
func GetDaemonSetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
func GetDaemonSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetStatefulSetVolumes returns the Volumes of given statefulSet
|
||||
func GetStatefulSetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
func GetStatefulSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
|
||||
func GetDeploymentConfigVolumes(item interface{}) []v1.Volume {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
func GetDeploymentConfigVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetRolloutVolumes returns the Volumes of given rollout
|
||||
func GetRolloutVolumes(item interface{}) []v1.Volume {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
func GetRolloutVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/leadership"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -18,9 +24,10 @@ import (
|
||||
// NewReloaderCommand starts the reloader controller
|
||||
func NewReloaderCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "reloader",
|
||||
Short: "A watcher for your Kubernetes cluster",
|
||||
Run: startReloader,
|
||||
Use: "reloader",
|
||||
Short: "A watcher for your Kubernetes cluster",
|
||||
PreRunE: validateFlags,
|
||||
Run: startReloader,
|
||||
}
|
||||
|
||||
// options
|
||||
@@ -32,10 +39,41 @@ func NewReloaderCommand() *cobra.Command {
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:vaule namespace labels to include")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
|
||||
cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validateFlags(*cobra.Command, []string) error {
|
||||
// Ensure the reload strategy is one of the following...
|
||||
var validReloadStrategy bool
|
||||
valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy}
|
||||
for _, s := range valid {
|
||||
if s == options.ReloadStrategy {
|
||||
validReloadStrategy = true
|
||||
}
|
||||
}
|
||||
|
||||
if !validReloadStrategy {
|
||||
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
|
||||
return errors.New(err)
|
||||
}
|
||||
|
||||
// Validate that HA options are correct
|
||||
if options.EnableHA {
|
||||
if err := validateHAEnvs(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureLogging(logFormat string) error {
|
||||
switch logFormat {
|
||||
case "json":
|
||||
@@ -49,6 +87,25 @@ func configureLogging(logFormat string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateHAEnvs() error {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
|
||||
if podName == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNameEnv, constants.PodNameEnv)
|
||||
}
|
||||
if podNamespace == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNamespaceEnv, constants.PodNamespaceEnv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHAEnvs() (string, string) {
|
||||
podName := os.Getenv(constants.PodNameEnv)
|
||||
podNamespace := os.Getenv(constants.PodNamespaceEnv)
|
||||
|
||||
return podName, podNamespace
|
||||
}
|
||||
|
||||
func startReloader(cmd *cobra.Command, args []string) {
|
||||
err := configureLogging(options.LogFormat)
|
||||
if err != nil {
|
||||
@@ -78,18 +135,34 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
namespaceLabelSelector, err := getNamespaceLabelSelector(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if len(namespaceLabelSelector) > 0 {
|
||||
logrus.Warnf("namespace-selector is set, will detect changes in namespaces with these labels: %s.", namespaceLabelSelector)
|
||||
}
|
||||
|
||||
collectors := metrics.SetupPrometheusEndpoint()
|
||||
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
if ignoredResourcesList.Contains(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, collectors)
|
||||
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, namespaceLabelSelector, collectors)
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
|
||||
// If HA is enabled we only run the controller when
|
||||
if options.EnableHA {
|
||||
continue
|
||||
}
|
||||
// Now let's start the controller
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
@@ -97,14 +170,38 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
go c.Run(1, stop)
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
select {}
|
||||
// Run leadership election
|
||||
if options.EnableHA {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
|
||||
}
|
||||
|
||||
leadership.SetupLivenessEndpoint()
|
||||
logrus.Fatal(http.ListenAndServe(constants.DefaultHttpListenAddr, nil))
|
||||
}
|
||||
|
||||
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
|
||||
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
|
||||
}
|
||||
|
||||
func getNamespaceLabelSelector(cmd *cobra.Command) (util.Map, error) {
|
||||
slice, err := getStringSliceFromFlags(cmd, "namespace-selector")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
var namespaceSelectorMap util.Map = make(util.Map)
|
||||
for _, kv := range slice {
|
||||
split := strings.Split(kv, ":")
|
||||
namespaceSelectorMap[split[0]] = split[1]
|
||||
}
|
||||
|
||||
return namespaceSelectorMap, nil
|
||||
}
|
||||
|
||||
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
|
||||
slice, err := cmd.Flags().GetStringSlice(flag)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,10 +1,32 @@
|
||||
package constants
|
||||
|
||||
const (
|
||||
// DefaultHttpListenAddr is the default listening address for global http server
|
||||
DefaultHttpListenAddr = ":9090"
|
||||
|
||||
// ConfigmapEnvVarPostfix is a postfix for configmap envVar
|
||||
ConfigmapEnvVarPostfix = "CONFIGMAP"
|
||||
// SecretEnvVarPostfix is a postfix for secret envVar
|
||||
SecretEnvVarPostfix = "SECRET"
|
||||
// EnvVarPrefix is a Prefix for environment variable
|
||||
EnvVarPrefix = "STAKATER_"
|
||||
|
||||
// ReloaderAnnotationPrefix is a Prefix for all reloader annotations
|
||||
ReloaderAnnotationPrefix = "reloader.stakater.com"
|
||||
// LastReloadedFromAnnotation is an annotation used to describe the last resource that triggered a reload
|
||||
LastReloadedFromAnnotation = "last-reloaded-from"
|
||||
|
||||
// ReloadStrategyFlag The reload strategy flag name
|
||||
ReloadStrategyFlag = "reload-strategy"
|
||||
// EnvVarsReloadStrategy instructs Reloader to add container environment variables to facilitate a restart
|
||||
EnvVarsReloadStrategy = "env-vars"
|
||||
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
|
||||
AnnotationsReloadStrategy = "annotations"
|
||||
)
|
||||
|
||||
// Leadership election related consts
|
||||
const (
|
||||
LockName string = "stakater-reloader-lock"
|
||||
PodNameEnv string = "POD_NAME"
|
||||
PodNamespaceEnv string = "POD_NAMESPACE"
|
||||
)
|
||||
|
||||
@@ -1,22 +1,27 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
)
|
||||
|
||||
// Controller for checking events
|
||||
@@ -26,19 +31,38 @@ type Controller struct {
|
||||
queue workqueue.RateLimitingInterface
|
||||
informer cache.Controller
|
||||
namespace string
|
||||
resource string
|
||||
ignoredNamespaces util.List
|
||||
collectors metrics.Collectors
|
||||
recorder record.EventRecorder
|
||||
namespaceSelector map[string]string
|
||||
}
|
||||
|
||||
// controllerInitialized flag determines whether controlled is being initialized
|
||||
var secretControllerInitialized bool = false
|
||||
var configmapControllerInitialized bool = false
|
||||
|
||||
// NewController for initializing a Controller
|
||||
func NewController(
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector map[string]string, collectors metrics.Collectors) (*Controller, error) {
|
||||
|
||||
if options.SyncAfterRestart {
|
||||
secretControllerInitialized = true
|
||||
configmapControllerInitialized = true
|
||||
}
|
||||
|
||||
c := Controller{
|
||||
client: client,
|
||||
namespace: namespace,
|
||||
ignoredNamespaces: ignoredNamespaces,
|
||||
namespaceSelector: namespaceLabelSelector,
|
||||
resource: resource,
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{
|
||||
Interface: client.CoreV1().Events(""),
|
||||
})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)})
|
||||
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, fields.Everything())
|
||||
@@ -52,13 +76,23 @@ func NewController(
|
||||
c.informer = informer
|
||||
c.queue = queue
|
||||
c.collectors = collectors
|
||||
c.recorder = recorder
|
||||
|
||||
logrus.Infof("created controller for: %s", resource)
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
// Not required as reloader should update the resource in the event of any change and not in the event of any resource creation.
|
||||
// This causes the issue where reloader reloads the pods when reloader itself gets restarted as it's queue is filled with all the k8s objects as new resources.
|
||||
if options.ReloadOnCreate == "true" {
|
||||
if !c.resourceInIgnoredNamespace(obj) && c.resourceInNamespaceSelector(obj) && secretControllerInitialized && configmapControllerInitialized {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
@@ -71,13 +105,50 @@ func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInNamespaceSelector(raw interface{}) bool {
|
||||
if len(c.namespaceSelector) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
switch object := raw.(type) {
|
||||
case *v1.ConfigMap:
|
||||
return c.matchLabels(object.ObjectMeta.Namespace)
|
||||
case *v1.Secret:
|
||||
return c.matchLabels(object.ObjectMeta.Namespace)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) matchLabels(resourceNamespace string) bool {
|
||||
namespace, err := c.client.CoreV1().Namespaces().Get(context.Background(), resourceNamespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
return false
|
||||
}
|
||||
|
||||
for selectorKey, selectorVal := range c.namespaceSelector {
|
||||
|
||||
namespaceLabelVal, namespaceLabelKeyExists := namespace.ObjectMeta.Labels[selectorKey]
|
||||
|
||||
if namespaceLabelKeyExists && selectorVal == "*" {
|
||||
continue
|
||||
}
|
||||
|
||||
if !namespaceLabelKeyExists || selectorVal != namespaceLabelVal {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Update function to add an old object and a new object to the queue in case of updating a resource
|
||||
func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
if !c.resourceInIgnoredNamespace(new) {
|
||||
if !c.resourceInIgnoredNamespace(new) && c.resourceInNamespaceSelector(new) {
|
||||
c.queue.Add(handler.ResourceUpdatedHandler{
|
||||
Resource: new,
|
||||
OldResource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -87,7 +158,7 @@ func (c *Controller) Delete(old interface{}) {
|
||||
// Todo: Any future delete event can be handled here
|
||||
}
|
||||
|
||||
//Run function for controller which handles the queue
|
||||
// Run function for controller which handles the queue
|
||||
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
@@ -111,6 +182,13 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
// At this point the controller is fully initialized and we can start processing the resources
|
||||
if c.resource == "secrets" {
|
||||
secretControllerInitialized = true
|
||||
} else if c.resource == "configMaps" {
|
||||
configmapControllerInitialized = true
|
||||
}
|
||||
|
||||
for c.processNextItem() {
|
||||
}
|
||||
}
|
||||
@@ -156,5 +234,6 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
c.queue.Forget(key)
|
||||
// Report to an external entity that, even after several retries, we could not successfully process this key
|
||||
runtime.HandleError(err)
|
||||
logrus.Infof("Dropping the key %q out of the queue: %v", key, err)
|
||||
logrus.Errorf("Dropping key out of the queue: %v", err)
|
||||
logrus.Debugf("Dropping the key %q out of the queue: %v", key, err)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,12 +5,14 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceCreatedHandler contains new objects
|
||||
type ResourceCreatedHandler struct {
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Handle processes the newly created resource
|
||||
@@ -20,7 +22,7 @@ func (r ResourceCreatedHandler) Handle() error {
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// process resource based on its type
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceUpdatedHandler contains updated objects
|
||||
@@ -12,6 +13,7 @@ type ResourceUpdatedHandler struct {
|
||||
Resource interface{}
|
||||
OldResource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Handle processes the updated resource
|
||||
@@ -22,7 +24,7 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
// process resource based on its type
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
alert "github.com/stakater/Reloader/internal/pkg/alerts"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
@@ -13,6 +19,9 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
|
||||
@@ -85,31 +94,31 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
|
||||
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kube.IsOpenshift {
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.IsArgoRollouts == "true" {
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -118,9 +127,9 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors, recorder)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
@@ -128,7 +137,7 @@ func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callb
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
|
||||
for _, i := range items {
|
||||
@@ -146,15 +155,16 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
result := constants.NotUpdated
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
if err == nil && reloaderEnabled {
|
||||
result = updateContainers(upgradeFuncs, i, config, true)
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
|
||||
if result != constants.Updated && annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
result = updateContainers(upgradeFuncs, i, config, false)
|
||||
value = strings.TrimSpace(value)
|
||||
re := regexp.MustCompile("^" + value + "$")
|
||||
if re.Match([]byte(config.ResourceName)) {
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, false)
|
||||
if result == constants.Updated {
|
||||
break
|
||||
}
|
||||
@@ -165,21 +175,40 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
if result != constants.Updated && searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
result = updateContainers(upgradeFuncs, i, config, true)
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
}
|
||||
|
||||
if result == constants.Updated {
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
resourceName := util.ToObjectMeta(i).Name
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
return err
|
||||
}
|
||||
resourceName := accessor.GetName()
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
logrus.Errorf(message)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeWarning, "ReloadFail", message)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
logrus.Infof(message)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
|
||||
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeNormal, "Reloaded", message)
|
||||
}
|
||||
if ok && alert_on_reload == "true" {
|
||||
msg := fmt.Sprintf(
|
||||
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
|
||||
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
alert.SendWebhookAlert(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -257,7 +286,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
|
||||
volumes := upgradeFuncs.VolumesFunc(item)
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
initContainers := upgradeFuncs.InitContainersFunc(item)
|
||||
@@ -296,10 +325,70 @@ func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
|
||||
return container
|
||||
}
|
||||
|
||||
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
}
|
||||
|
||||
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
|
||||
// Note: the data on this struct is purely informational and is not used for future updates
|
||||
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
annotations, err := createReloadedAnnotations(&reloadSource)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
|
||||
return constants.NotUpdated
|
||||
}
|
||||
|
||||
// Copy the all annotations to the item's annotations
|
||||
pa := upgradeFuncs.PodAnnotationsFunc(item)
|
||||
if pa == nil {
|
||||
return constants.NotUpdated
|
||||
}
|
||||
|
||||
for k, v := range annotations {
|
||||
pa[k] = v
|
||||
}
|
||||
|
||||
return constants.Updated
|
||||
}
|
||||
|
||||
func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
|
||||
if target == nil {
|
||||
return nil, errors.New("target is required")
|
||||
}
|
||||
|
||||
// Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the
|
||||
// resource that caused the last invokeReloadStrategy.
|
||||
// Intentionally only storing the last item in order to keep
|
||||
// the generated annotations as small as possible.
|
||||
annotations := make(map[string]string)
|
||||
lastReloadedResourceName := fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
|
||||
lastReloadedResource, err := json.Marshal(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
annotations[lastReloadedResourceName] = string(lastReloadedResource)
|
||||
return annotations, nil
|
||||
}
|
||||
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
var result constants.Result
|
||||
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
107
internal/pkg/leadership/leadership.go
Normal file
107
internal/pkg/leadership/leadership.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
||||
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Used for liveness probe
|
||||
m sync.Mutex
|
||||
healthy bool = true
|
||||
)
|
||||
|
||||
func GetNewLock(client coordinationv1.CoordinationV1Interface, lockName, podname, namespace string) *resourcelock.LeaseLock {
|
||||
return &resourcelock.LeaseLock{
|
||||
LeaseMeta: v1.ObjectMeta{
|
||||
Name: lockName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Client: client,
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: podname,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// runLeaderElection runs leadership election. If an instance of the controller is the leader and stops leading it will shutdown.
|
||||
func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel context.CancelFunc, id string, controllers []*controller.Controller) {
|
||||
// Construct channels for the controllers to use
|
||||
var stopChannels []chan struct{}
|
||||
for i := 0; i < len(controllers); i++ {
|
||||
stop := make(chan struct{})
|
||||
stopChannels = append(stopChannels, stop)
|
||||
}
|
||||
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: 15 * time.Second,
|
||||
RenewDeadline: 10 * time.Second,
|
||||
RetryPeriod: 2 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(c context.Context) {
|
||||
logrus.Info("became leader, starting controllers")
|
||||
runControllers(controllers, stopChannels)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
logrus.Info("no longer leader, shutting down")
|
||||
stopControllers(stopChannels)
|
||||
cancel()
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
healthy = false
|
||||
},
|
||||
OnNewLeader: func(current_id string) {
|
||||
if current_id == id {
|
||||
logrus.Info("still the leader!")
|
||||
return
|
||||
}
|
||||
logrus.Infof("new leader is %s", current_id)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) {
|
||||
for i, c := range controllers {
|
||||
c := c
|
||||
go c.Run(1, stopChannels[i])
|
||||
}
|
||||
}
|
||||
|
||||
func stopControllers(stopChannels []chan struct{}) {
|
||||
for _, c := range stopChannels {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Healthz sets up the liveness probe endpoint. If leadership election is
|
||||
// enabled and a replica stops leading the liveness probe will fail and the
|
||||
// kubelet will restart the container.
|
||||
func SetupLivenessEndpoint() {
|
||||
http.HandleFunc("/live", healthz)
|
||||
}
|
||||
|
||||
func healthz(w http.ResponseWriter, req *http.Request) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if healthy {
|
||||
if i, err := w.Write([]byte("alive")); err != nil {
|
||||
logrus.Infof("failed to write liveness response, wrote: %d bytes, got err: %s", i, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
213
internal/pkg/leadership/leadership_test.go
Normal file
213
internal/pkg/leadership/leadership_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
testutil.CreateNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
|
||||
testutil.DeleteNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func TestHealthz(t *testing.T) {
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 200
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Have the liveness probe serve a 500
|
||||
healthy = false
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElection validates that the liveness endpoint serves 500 when
|
||||
// leadership election fails
|
||||
func TestRunLeaderElection(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), constants.LockName, testutil.Pod, testutil.Namespace)
|
||||
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, []*controller.Controller{})
|
||||
|
||||
// Liveness probe should be serving OK
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Cancel the leader election context, so leadership is released and
|
||||
// live endpoint serves 500
|
||||
cancel()
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElectionWithControllers tests that leadership election works
|
||||
// wiht real controllers and that on context cancellation the controllers stop
|
||||
// running.
|
||||
func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
t.Logf("Creating controller")
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, map[string]string{}, metrics.NewCollectors())
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), fmt.Sprintf("%s-%d", constants.LockName, 1), testutil.Pod, testutil.Namespace)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
// Start running leadership election, this also starts the controllers
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, controllers)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Create some stuff and do a thing
|
||||
configmapName := testutil.ConfigmapNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
configmapClient, err := testutil.CreateConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating the configmap %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(testutil.Clients.KubernetesClient, configmapName, testutil.Namespace, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating configmap for first time
|
||||
updateErr := testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod envvars has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Fatalf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
|
||||
// Cancel the leader election context, so leadership is released
|
||||
logrus.Info("shutting down controller from test")
|
||||
cancel()
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Updating configmap again
|
||||
updateErr = testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com/new")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
|
||||
// Verifying that the deployment was not updated as leadership has been lost
|
||||
logrus.Infof("Verifying pod envvars has not been updated")
|
||||
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
|
||||
config = util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs = handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated = testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if updated {
|
||||
t.Fatalf("Deployment was updated")
|
||||
}
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap
|
||||
err = testutil.DeleteConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package metrics
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@@ -33,11 +32,7 @@ func NewCollectors() Collectors {
|
||||
func SetupPrometheusEndpoint() Collectors {
|
||||
collectors := NewCollectors()
|
||||
prometheus.MustRegister(collectors.Reloaded)
|
||||
|
||||
go func() {
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
logrus.Fatal(http.ListenAndServe(":9090", nil))
|
||||
}()
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
return collectors
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package options
|
||||
|
||||
import "github.com/stakater/Reloader/internal/pkg/constants"
|
||||
|
||||
var (
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// configmaps specified by name
|
||||
@@ -17,6 +19,13 @@ var (
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// Adds support for argo rollouts
|
||||
// IsArgoRollouts Adds support for argo rollouts
|
||||
IsArgoRollouts = "false"
|
||||
// ReloadStrategy Specify the update strategy
|
||||
ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
// ReloadOnCreate Adds support to watch create events
|
||||
ReloadOnCreate = "false"
|
||||
SyncAfterRestart = false
|
||||
// EnableHA adds support for running multiple replicas via leadership election
|
||||
EnableHA = false
|
||||
)
|
||||
|
||||
@@ -2,6 +2,8 @@ package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -14,11 +16,13 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
@@ -32,6 +36,19 @@ var (
|
||||
SecretResourceType = "secrets"
|
||||
)
|
||||
|
||||
var (
|
||||
Clients = kube.GetClients()
|
||||
Pod = "test-reloader-" + RandSeq(5)
|
||||
Namespace = "test-reloader-" + RandSeq(5)
|
||||
ConfigmapNamePrefix = "testconfigmap-reloader"
|
||||
SecretNamePrefix = "testsecret-reloader"
|
||||
Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
|
||||
Collectors = metrics.NewCollectors()
|
||||
SleepDuration = 3 * time.Second
|
||||
)
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||
@@ -563,8 +580,8 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
|
||||
}
|
||||
}
|
||||
|
||||
// GetResourceSHA returns the SHA value of given environment variable
|
||||
func GetResourceSHA(containers []v1.Container, envVar string) string {
|
||||
// GetResourceSHAFromEnvVar returns the SHA value of given environment variable
|
||||
func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
@@ -576,7 +593,29 @@ func GetResourceSHA(containers []v1.Container, envVar string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
//ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
// GetResourceSHAFromAnnotation returns the SHA value of given environment variable
|
||||
func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
|
||||
lastReloadedResourceName := fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
|
||||
annotationJson, ok := podAnnotations[lastReloadedResourceName]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
var last util.ReloadSource
|
||||
bytes := []byte(annotationJson)
|
||||
err := json.Unmarshal(bytes, &last)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return last.Hash
|
||||
}
|
||||
|
||||
// ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
|
||||
values := []string{}
|
||||
if resourceType == SecretResourceType {
|
||||
@@ -798,7 +837,6 @@ func DeleteSecret(client kubernetes.Interface, namespace string, secretName stri
|
||||
|
||||
// RandSeq generates a random sequence
|
||||
func RandSeq(n int) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letters[rand.Intn(len(letters))]
|
||||
@@ -806,15 +844,20 @@ func RandSeq(n int) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// VerifyResourceUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
annotations := accessor.GetAnnotations()
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
|
||||
annotationValue := annotations[config.Annotation]
|
||||
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
@@ -836,7 +879,50 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
|
||||
|
||||
if matches {
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix
|
||||
updated := GetResourceSHA(containers, envName)
|
||||
updated := GetResourceSHAFromEnvVar(containers, envName)
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
annotations := accessor.GetAnnotations()
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := annotations[config.Annotation]
|
||||
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
matches = true
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if searchAnnotationValue == "true" {
|
||||
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
|
||||
matches = true
|
||||
}
|
||||
}
|
||||
|
||||
if matches {
|
||||
updated := GetResourceSHAFromAnnotation(podAnnotations)
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
|
||||
39
internal/pkg/util/reload_source.go
Normal file
39
internal/pkg/util/reload_source.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package util
|
||||
|
||||
import "time"
|
||||
|
||||
type ReloadSource struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Hash string `json:"hash"`
|
||||
ContainerRefs []string `json:"containerRefs"`
|
||||
ObservedAt int64 `json:"observedAt"`
|
||||
}
|
||||
|
||||
func NewReloadSource(
|
||||
resourceName string,
|
||||
resourceNamespace string,
|
||||
resourceType string,
|
||||
resourceHash string,
|
||||
containerRefs []string,
|
||||
) ReloadSource {
|
||||
return ReloadSource{
|
||||
ObservedAt: time.Now().Unix(),
|
||||
Name: resourceName,
|
||||
Namespace: resourceNamespace,
|
||||
Type: resourceType,
|
||||
Hash: resourceHash,
|
||||
ContainerRefs: containerRefs,
|
||||
}
|
||||
}
|
||||
|
||||
func NewReloadSourceFromConfig(config Config, containerRefs []string) ReloadSource {
|
||||
return NewReloadSource(
|
||||
config.ResourceName,
|
||||
config.Namespace,
|
||||
config.Type,
|
||||
config.SHAValue,
|
||||
containerRefs,
|
||||
)
|
||||
}
|
||||
@@ -54,6 +54,8 @@ func GetSHAfromSecret(data map[string][]byte) string {
|
||||
|
||||
type List []string
|
||||
|
||||
type Map map[string]string
|
||||
|
||||
func (l *List) Contains(s string) bool {
|
||||
for _, v := range *l {
|
||||
if v == s {
|
||||
|
||||
31
okteto.yml
31
okteto.yml
@@ -1,14 +1,17 @@
|
||||
name: reloader-reloader
|
||||
image: okteto/golang:1
|
||||
command: bash
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_PTRACE
|
||||
volumes:
|
||||
- /go/pkg/
|
||||
- /root/.cache/go-build/
|
||||
sync:
|
||||
- .:/app
|
||||
forward:
|
||||
- 2345:2345
|
||||
dev:
|
||||
reloader-reloader:
|
||||
image: okteto/golang:1
|
||||
command: bash
|
||||
volumes:
|
||||
- /go/pkg/
|
||||
- /root/.cache/go-build/
|
||||
sync:
|
||||
- .:/app
|
||||
forward:
|
||||
- 2345:2345
|
||||
workdir: /app
|
||||
autocreate: true
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_PTRACE
|
||||
|
||||
Reference in New Issue
Block a user