mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
205 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5a65cf9f6d | ||
|
|
a8a68ae1b0 | ||
|
|
7643a27fb1 | ||
|
|
71fdb53c2e | ||
|
|
d6312f6f83 | ||
|
|
19220f5e6e | ||
|
|
05456b0905 | ||
|
|
10328dee8d | ||
|
|
fd174ed691 | ||
|
|
2e47f1740c | ||
|
|
15cb96f945 | ||
|
|
1e987db54d | ||
|
|
12a7fed3ae | ||
|
|
f18fac66c2 | ||
|
|
b5c95f9cbf | ||
|
|
46b948388f | ||
|
|
78be58b090 | ||
|
|
54a8e0683b | ||
|
|
702f0caa93 | ||
|
|
2e709e85ae | ||
|
|
debfd57a91 | ||
|
|
c3b8af34ac | ||
|
|
7a65bcb35b | ||
|
|
af6cd9e37c | ||
|
|
344004d0b3 | ||
|
|
a5bc586f09 | ||
|
|
81ca7ab601 | ||
|
|
69c9ccb2ea | ||
|
|
0ec3effab8 | ||
|
|
dba42e91bc | ||
|
|
68fd3bebe5 | ||
|
|
52b975ef0d | ||
|
|
0679af76f4 | ||
|
|
309c10f632 | ||
|
|
07ddec9fd1 | ||
|
|
69a80fd1d9 | ||
|
|
04975de060 | ||
|
|
459a808371 | ||
|
|
ef8a335c93 | ||
|
|
93a52500d1 | ||
|
|
ac2dac330e | ||
|
|
e9843c7c7d | ||
|
|
1f154d0572 | ||
|
|
7ccb17392e | ||
|
|
e8da3f48ec | ||
|
|
614865a8d7 | ||
|
|
4f551ada6e | ||
|
|
608a928967 | ||
|
|
5a14798341 | ||
|
|
e7516e82e3 | ||
|
|
dc3494c041 | ||
|
|
79e3588389 | ||
|
|
45a833bbb2 | ||
|
|
1f22ebe132 | ||
|
|
1846b31936 | ||
|
|
935a17b1c7 | ||
|
|
7b44a472ad | ||
|
|
a46b56271c | ||
|
|
2f9dd7c422 | ||
|
|
f373686b75 | ||
|
|
80557ce43e | ||
|
|
c4f6d93eb9 | ||
|
|
c75c787738 | ||
|
|
ba18bbfd72 | ||
|
|
610b4e5716 | ||
|
|
dc0715de61 | ||
|
|
4f6ff420e8 | ||
|
|
966d5e61c0 | ||
|
|
d017747792 | ||
|
|
70099fdc8f | ||
|
|
aaddec1103 | ||
|
|
b5fdcd577d | ||
|
|
8b9bf07631 | ||
|
|
674444850d | ||
|
|
e74dcc3cbd | ||
|
|
dcae4c98ac | ||
|
|
94a83c5974 | ||
|
|
592976bf09 | ||
|
|
ed736c8e20 | ||
|
|
84133742b1 | ||
|
|
04e19a733b | ||
|
|
c1ae5efb7b | ||
|
|
f630336fed | ||
|
|
fde312edcc | ||
|
|
57eb4f4eaa | ||
|
|
1490a1feaa | ||
|
|
58c622eb91 | ||
|
|
2fd8b190b1 | ||
|
|
81c840ea30 | ||
|
|
21dbeb9810 | ||
|
|
fba004d655 | ||
|
|
631781aa8a | ||
|
|
707dccf6b8 | ||
|
|
5edd29b8e9 | ||
|
|
27815ea3b3 | ||
|
|
5fd275a05c | ||
|
|
b22694d3c2 | ||
|
|
5c95c6898b | ||
|
|
46bc4b71db | ||
|
|
cee81b4757 | ||
|
|
1cec52637f | ||
|
|
1901a4eb49 | ||
|
|
710396f66e | ||
|
|
11bafa9f36 | ||
|
|
9a45318fc9 | ||
|
|
843f47600a | ||
|
|
3d9dee27b5 | ||
|
|
63fd3c2635 | ||
|
|
284ca59ca4 | ||
|
|
2ce24abe40 | ||
|
|
6419444663 | ||
|
|
1a6fd3e302 | ||
|
|
7ac90b8c88 | ||
|
|
faf27c2d5d | ||
|
|
6a0dfd3ce0 | ||
|
|
fdbc3067ce | ||
|
|
c4ead210ee | ||
|
|
0441f6d481 | ||
|
|
09b9a073a0 | ||
|
|
d6d188f224 | ||
|
|
422c291b06 | ||
|
|
ed6ea026a8 | ||
|
|
da30b4744b | ||
|
|
503e357349 | ||
|
|
61e9202781 | ||
|
|
8dbe7a85af | ||
|
|
e86f616305 | ||
|
|
0c36cfd602 | ||
|
|
f38f86a45c | ||
|
|
5033b8fcdc | ||
|
|
be4285742a | ||
|
|
6a008999f5 | ||
|
|
93f4ea240f | ||
|
|
c6fbae2f62 | ||
|
|
3fe0ebb48a | ||
|
|
67b847bf41 | ||
|
|
eaa3db48f5 | ||
|
|
a505d2e3b1 | ||
|
|
9ec5515a39 | ||
|
|
8db17acf67 | ||
|
|
b43719cf34 | ||
|
|
e8216069a5 | ||
|
|
732d35e45f | ||
|
|
dcedaa2cfe | ||
|
|
8d77121c3b | ||
|
|
013cd92219 | ||
|
|
39b5be37af | ||
|
|
86c2ed265d | ||
|
|
87130f06bc | ||
|
|
17f702f510 | ||
|
|
16f3055e10 | ||
|
|
4800af8e28 | ||
|
|
db79c65334 | ||
|
|
d2223f313f | ||
|
|
c9dabc3a14 | ||
|
|
e61f9a6bdb | ||
|
|
6bcec06052 | ||
|
|
0988e8947f | ||
|
|
ff27cc0f51 | ||
|
|
be7d454504 | ||
|
|
3131116ed6 | ||
|
|
965cacf1ba | ||
|
|
e81b49d81b | ||
|
|
17f8b81110 | ||
|
|
5980c91560 | ||
|
|
fda733ea5a | ||
|
|
732cd5b53a | ||
|
|
aae0c5c443 | ||
|
|
d4223311de | ||
|
|
29173c7364 | ||
|
|
7767809a38 | ||
|
|
f0d6a9e646 | ||
|
|
d1538dbeec | ||
|
|
8470962383 | ||
|
|
139bc1ca38 | ||
|
|
6d9f89a452 | ||
|
|
5ba914d6bb | ||
|
|
9a5094a4ed | ||
|
|
3fe7ad04e9 | ||
|
|
bf6a247f54 | ||
|
|
8203cc3c11 | ||
|
|
f287d84b6a | ||
|
|
205d36512c | ||
|
|
0b39353c12 | ||
|
|
97a5616e60 | ||
|
|
b274ac0947 | ||
|
|
ed29d1d18c | ||
|
|
2384d65953 | ||
|
|
7b19601423 | ||
|
|
76bf43cb13 | ||
|
|
1b7bb3bead | ||
|
|
c844f12f73 | ||
|
|
5ac2164a1c | ||
|
|
c9b89c37c1 | ||
|
|
55bc4c3e22 | ||
|
|
77c7d63296 | ||
|
|
2ae4753efb | ||
|
|
68d0349793 | ||
|
|
ded923b12a | ||
|
|
0726999bf9 | ||
|
|
f89c321a50 | ||
|
|
225427cec1 | ||
|
|
be86e8417f | ||
|
|
bf961c0456 | ||
|
|
3248ca9578 |
136
.github/workflows/pull_request.yaml
vendored
Normal file
136
.github/workflows/pull_request.yaml
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.15.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.7.0"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build
|
||||
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
|
||||
- name: Helm Lint
|
||||
run: |
|
||||
cd deployments/kubernetes/chart/reloader
|
||||
helm lint
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/ && rm kubectl
|
||||
kubectl version --short --client
|
||||
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
|
||||
|
||||
- name: Install Kind
|
||||
run: |
|
||||
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
|
||||
sudo install ./kind /usr/local/bin && rm kind
|
||||
kind version
|
||||
kind version | grep -q ${KIND_VERSION}
|
||||
|
||||
- name: Create Kind Cluster
|
||||
run: |
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
run: |
|
||||
sha=${{ github.event.pull_request.head.sha }}
|
||||
tag="SNAPSHOT-PR-${{ github.event.pull_request.number }}-${sha:0:8}"
|
||||
echo "##[set-output name=GIT_TAG;]$(echo ${tag})"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Generate image repository path
|
||||
run: |
|
||||
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
|
||||
- name: Build and Push Docker Image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Comment on PR
|
||||
uses: mshick/add-pr-comment@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
with:
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ github.repository }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
allow-repeats: false
|
||||
|
||||
- name: Notify Failure
|
||||
if: failure()
|
||||
uses: mshick/add-pr-comment@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
with:
|
||||
message: '@${{ github.actor }} Yikes! You better fix it before anyone else finds out! [Build](https://github.com/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha }}/checks) has Failed!'
|
||||
allow-repeats: false
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
175
.github/workflows/push.yaml
vendored
Normal file
175
.github/workflows/push.yaml
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
name: Push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.15.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.7.0"
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.26.0
|
||||
golangci-lint run --timeout=10m ./...
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/ && rm kubectl
|
||||
kubectl version --short --client
|
||||
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
|
||||
|
||||
- name: Install Kind
|
||||
run: |
|
||||
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
|
||||
sudo install ./kind /usr/local/bin && rm kind
|
||||
kind version
|
||||
kind version | grep -q ${KIND_VERSION}
|
||||
|
||||
- name: Create Kind Cluster
|
||||
run: |
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
DEFAULT_BUMP: patch
|
||||
DRY_RUN: true
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Generate image repository path
|
||||
run: |
|
||||
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
##############################
|
||||
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
|
||||
##############################
|
||||
|
||||
# Generate tag for operator without "v"
|
||||
- name: Generate Operator Tag
|
||||
id: generate_operator_tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: false
|
||||
DEFAULT_BUMP: patch
|
||||
DRY_RUN: true
|
||||
|
||||
# Update chart tag to the latest semver tag
|
||||
- name: Update Chart Version
|
||||
env:
|
||||
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
|
||||
run: make bump-chart
|
||||
|
||||
# Publish helm chart
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
with:
|
||||
branch: master
|
||||
repository: stakater-charts
|
||||
target_dir: docs
|
||||
token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
charts_dir: deployments/kubernetes/chart/
|
||||
charts_url: ${{ env.HELM_REGISTRY_URL }}
|
||||
owner: stakater
|
||||
linting: on
|
||||
commit_username: stakater-user
|
||||
commit_email: stakater@gmail.com
|
||||
|
||||
# Commit back changes
|
||||
- name: Commit files
|
||||
run: |
|
||||
git config --local user.email "stakater@gmail.com"
|
||||
git config --local user.name "stakater-user"
|
||||
git status
|
||||
git add .
|
||||
git commit -m "[skip-ci] Update artifacts" -a
|
||||
|
||||
- name: Push changes
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
branch: ${{ github.ref }}
|
||||
|
||||
- name: Push Latest Tag
|
||||
uses: anothrNick/github-tag-action@1.26.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
DEFAULT_BUMP: patch
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
44
.github/workflows/release.yaml
vendored
Normal file
44
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: Release Go project
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: 1.15.2
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: GoReleaser build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # See: https://goreleaser.com/ci/actions/
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
id: go
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@master
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always()
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
@@ -10,6 +10,8 @@ builds:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
archives:
|
||||
- name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}-next"
|
||||
checksum:
|
||||
@@ -21,4 +23,4 @@ changelog:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
env_files:
|
||||
github_token: /home/jenkins/.apitoken/hub
|
||||
github_token: /home/jenkins/.apitoken/hub
|
||||
|
||||
34
Dockerfile
Normal file
34
Dockerfile
Normal file
@@ -0,0 +1,34 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM} golang:1.15.2 as builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
# cache deps before building and copying source so that we don't need to re-download as much
|
||||
# and so that source changes don't invalidate our downloaded layer
|
||||
RUN go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY main.go main.go
|
||||
COPY internal/ internal/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} GO111MODULE=on go build -mod=mod -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER nonroot:nonroot
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
||||
7
Jenkinsfile
vendored
7
Jenkinsfile
vendored
@@ -1,7 +0,0 @@
|
||||
#!/usr/bin/groovy
|
||||
@Library('github.com/stakater/stakater-pipeline-library@v2.16.10') _
|
||||
|
||||
goBuildViaGoReleaser {
|
||||
publicChartRepositoryURL = 'https://stakater.github.io/stakater-charts'
|
||||
publicChartGitURL = 'git@github.com:stakater/stakater-charts.git'
|
||||
}
|
||||
66
Makefile
66
Makefile
@@ -1,15 +1,20 @@
|
||||
# note: call scripts from /scripts
|
||||
|
||||
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy
|
||||
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy release release-all manifest push clean-image
|
||||
|
||||
BUILDER ?= reloader-builder
|
||||
OS ?= linux
|
||||
ARCH ?= ???
|
||||
ALL_ARCH ?= arm64 arm amd64
|
||||
|
||||
BUILDER ?= reloader-builder-${ARCH}
|
||||
BINARY ?= Reloader
|
||||
DOCKER_IMAGE ?= stakater/reloader
|
||||
# Default value "dev"
|
||||
DOCKER_TAG ?= 1.0.0
|
||||
REPOSITORY = ${DOCKER_IMAGE}:${DOCKER_TAG}
|
||||
TAG ?= v0.0.75.0
|
||||
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${TAG}
|
||||
REPOSITORY_ARCH = ${DOCKER_IMAGE}:${TAG}-${ARCH}
|
||||
|
||||
VERSION=$(shell cat .version)
|
||||
VERSION ?= 0.0.1
|
||||
BUILD=
|
||||
|
||||
GOCMD = go
|
||||
@@ -25,10 +30,35 @@ build:
|
||||
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
|
||||
|
||||
builder-image:
|
||||
@docker build --network host -t "${BUILDER}" -f build/package/Dockerfile.build .
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
|
||||
reloader-${ARCH}.tar:
|
||||
docker buildx build --platform ${OS}/${ARCH} --build-arg GOARCH=$(ARCH) -t "${BUILDER}" --load -f build/package/Dockerfile.build .
|
||||
docker run --platform ${OS}/${ARCH} --rm "${BUILDER}" > reloader-${ARCH}.tar
|
||||
|
||||
binary-image: builder-image
|
||||
@docker run --network host --rm "${BUILDER}" | docker build --network host -t "${REPOSITORY}" -f Dockerfile.run -
|
||||
cat reloader-${ARCH}.tar | docker buildx build --platform ${OS}/${ARCH} -t "${REPOSITORY_ARCH}" --load -f Dockerfile.run -
|
||||
|
||||
push:
|
||||
docker push ${REPOSITORY_ARCH}
|
||||
|
||||
release: binary-image push manifest
|
||||
|
||||
release-all:
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
# Make arch-specific release
|
||||
@for arch in $(ALL_ARCH) ; do \
|
||||
echo Make release: $$arch ; \
|
||||
make release ARCH=$$arch ; \
|
||||
done
|
||||
|
||||
set -e
|
||||
docker manifest push --purge $(REPOSITORY_GENERIC)
|
||||
|
||||
manifest:
|
||||
set -e
|
||||
docker manifest create -a $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
|
||||
docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
|
||||
|
||||
test:
|
||||
"$(GOCMD)" test -timeout 1800s -v ./...
|
||||
@@ -37,15 +67,29 @@ stop:
|
||||
@docker stop "${BINARY}"
|
||||
|
||||
clean-images: stop
|
||||
@docker rmi "${BUILDER}" "${BINARY}"
|
||||
-docker rmi "${BINARY}"
|
||||
@for arch in $(ALL_ARCH) ; do \
|
||||
echo Clean image: $$arch ; \
|
||||
make clean-image ARCH=$$arch ; \
|
||||
done
|
||||
-docker rmi "${REPOSITORY_GENERIC}"
|
||||
|
||||
clean-image:
|
||||
-docker rmi "${BUILDER}"
|
||||
-docker rmi "${REPOSITORY_ARCH}"
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
|
||||
clean:
|
||||
"$(GOCMD)" clean -i
|
||||
|
||||
push: ## push the latest Docker image to DockerHub
|
||||
docker push $(REPOSITORY)
|
||||
-rm -rf reloader-*.tar
|
||||
|
||||
apply:
|
||||
kubectl apply -f deployments/manifests/ -n temp-reloader
|
||||
|
||||
deploy: binary-image push apply
|
||||
|
||||
# Bump Chart
|
||||
bump-chart:
|
||||
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
|
||||
74
README.md
74
README.md
@@ -13,11 +13,11 @@
|
||||
|
||||
## Problem
|
||||
|
||||
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset` and `Statefulset`
|
||||
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset`, `Statefulset` and `Rollout`
|
||||
|
||||
## Solution
|
||||
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` and `Statefulsets`.
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts`.
|
||||
|
||||
## Compatibility
|
||||
|
||||
@@ -36,9 +36,44 @@ spec:
|
||||
template: metadata:
|
||||
```
|
||||
|
||||
This will discover deployments/daemonsets/statefulset automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deployment, daemonset or statefulset.
|
||||
You can restrict this discovery to only `ConfigMap` or `Secret` objects that
|
||||
are tagged with a special annotation. To take advantage of that, annotate
|
||||
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
reloader.stakater.com/search: "true"
|
||||
spec:
|
||||
template:
|
||||
```
|
||||
|
||||
and Reloader will trigger the rolling upgrade upon modification of any
|
||||
`ConfigMap` or `Secret` annotated like this:
|
||||
|
||||
```yaml
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
annotations:
|
||||
reloader.stakater.com/match: "true"
|
||||
data:
|
||||
key: value
|
||||
```
|
||||
|
||||
provided the secret/configmap is being used in an environment variable, or a
|
||||
volume mount.
|
||||
|
||||
Please note that `reloader.stakater.com/search` and
|
||||
`reloader.stakater.com/auto` do not work together. If you have the
|
||||
`reloader.stakater.com/auto: "true"` annotation on your deployment, then it
|
||||
will always restart upon a change in configmaps or secrets it uses, regardless
|
||||
of whether they have the `reloader.stakater.com/match: "true"` annotation or
|
||||
not.
|
||||
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deploymentconfig, deployment, daemonset, statefulset or rollout.
|
||||
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
|
||||
|
||||
### Configmap
|
||||
@@ -96,13 +131,17 @@ spec:
|
||||
### NOTES
|
||||
|
||||
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with reloader.
|
||||
- For [rollouts](https://github.com/argoproj/argo-rollouts/) reloader simply triggers a change is up to you how you configure the rollout strategy.
|
||||
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
|
||||
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
|
||||
- you may override the auto annotation with the `--auto-annotation` flag
|
||||
- you may override the search annotation with the `--auto-search-annotation` flag
|
||||
and the match annotation with the `--search-match-annotation` flag
|
||||
- you may override the configmap annotation with the `--configmap-annotation` flag
|
||||
- you may override the secret annotation with the `--secret-annotation` flag
|
||||
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
|
||||
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
|
||||
- you can configure logging in JSON format with the `--log-format=json` option
|
||||
|
||||
## Deploying to Kubernetes
|
||||
|
||||
@@ -116,7 +155,7 @@ You can apply vanilla manifests by changing `RELEASE-NAME` placeholder provided
|
||||
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
|
||||
```
|
||||
|
||||
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
By default, Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following args (`spec.template.spec.containers.args`) to its container :
|
||||
|
||||
@@ -145,8 +184,6 @@ You can write your own `kustomization.yaml` using ours as a 'base' and write pat
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namePrefix: reloader-
|
||||
|
||||
bases:
|
||||
- https://github.com/stakater/Reloader/deployments/kubernetes
|
||||
|
||||
@@ -155,20 +192,20 @@ namespace: reloader
|
||||
|
||||
### Helm Charts
|
||||
|
||||
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands
|
||||
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating reloader from Helm2 to Helm3
|
||||
|
||||
```bash
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm repo update
|
||||
|
||||
helm install stakater/reloader
|
||||
helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
|
||||
```
|
||||
|
||||
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` and `Statefulsets` in `test` namespace.
|
||||
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
|
||||
|
||||
```bash
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test # For helm3 add --generate-name flag or set the release name
|
||||
```
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:
|
||||
@@ -180,11 +217,15 @@ Reloader can be configured to ignore the resources `secrets` and `configmaps` by
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
|
||||
|
||||
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
|
||||
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
|
||||
|
||||
## Help
|
||||
|
||||
### Documentation
|
||||
|
||||
You can find more documentation [here](docs/)
|
||||
You can find more documentation [here](docs)
|
||||
|
||||
### Have a question?
|
||||
|
||||
@@ -194,8 +235,8 @@ File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us a
|
||||
|
||||
Join and talk to us on Slack for discussing Reloader
|
||||
|
||||
[](https://stakater-slack.herokuapp.com/)
|
||||
[](https://stakater.slack.com/messages/CC5S05S12)
|
||||
[](https://slack.stakater.com/)
|
||||
[](https://stakater-community.slack.com/messages/CC5S05S12)
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -205,6 +246,11 @@ Please use the [issue tracker](https://github.com/stakater/Reloader/issues) to r
|
||||
|
||||
### Developing
|
||||
|
||||
1. Deploy Reloader.
|
||||
2. Run `okteto up` to activate your development container.
|
||||
3. `make build`.
|
||||
4. `./Reloader`
|
||||
|
||||
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
|
||||
|
||||
1. **Fork** the repo on GitHub
|
||||
|
||||
@@ -1,18 +1,23 @@
|
||||
FROM golang:1.13.1-alpine
|
||||
MAINTAINER "Stakater Team"
|
||||
FROM golang:1.15.2-alpine
|
||||
LABEL maintainer "Stakater Team"
|
||||
|
||||
RUN apk update
|
||||
ARG GOARCH=amd64
|
||||
|
||||
RUN apk -v --update \
|
||||
add git build-base && \
|
||||
rm -rf /var/cache/apk/* && \
|
||||
mkdir -p "$GOPATH/src/github.com/stakater/Reloader"
|
||||
--no-cache \
|
||||
add git build-base
|
||||
|
||||
ADD . "$GOPATH/src/github.com/stakater/Reloader"
|
||||
WORKDIR "$GOPATH/src/github.com/stakater/Reloader"
|
||||
|
||||
RUN cd "$GOPATH/src/github.com/stakater/Reloader" && \
|
||||
go mod download && \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0 GOOS=linux GOARCH=$GOARCH
|
||||
|
||||
RUN go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
|
||||
|
||||
COPY build/package/Dockerfile.run /
|
||||
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
FROM alpine:3.9
|
||||
MAINTAINER "Stakater Team"
|
||||
FROM alpine:3.11
|
||||
LABEL maintainer "Stakater Team"
|
||||
|
||||
RUN apk add --update ca-certificates
|
||||
RUN apk add --update --no-cache ca-certificates
|
||||
|
||||
COPY Reloader /bin/Reloader
|
||||
|
||||
# On alpine 'nobody' has uid 65534
|
||||
USER 65534
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/bin/Reloader"]
|
||||
|
||||
@@ -3,27 +3,29 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: v0.0.46
|
||||
appVersion: v0.0.46
|
||||
version: v0.0.91
|
||||
appVersion: v0.0.91
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
home: https://github.com/stakater/Reloader
|
||||
sources:
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
|
||||
maintainers:
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
- name: ahmedwaleedmalik
|
||||
email: waleed@stakater.com
|
||||
|
||||
@@ -5,6 +5,7 @@ approvers:
|
||||
- waseem-h
|
||||
- rasheedamir
|
||||
- ahsan-storm
|
||||
- ahmedwaleedmalik
|
||||
reviewers:
|
||||
- faizanahmad055
|
||||
- kahootali
|
||||
@@ -12,3 +13,4 @@ reviewers:
|
||||
- waseem-h
|
||||
- rasheedamir
|
||||
- ahsan-storm
|
||||
- ahmedwaleedmalik
|
||||
|
||||
@@ -12,15 +12,20 @@ Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "reloader-fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "reloader-labels.chart" -}}
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
release: {{ .Release.Name | quote }}
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
@@ -33,3 +38,11 @@ Create the name of the service account to use
|
||||
{{ default "default" .Values.reloader.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the annotations to support helm3
|
||||
*/}}
|
||||
{{- define "reloader-helm3.annotations" -}}
|
||||
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
|
||||
meta.helm.sh/release-name: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
@@ -1,7 +1,13 @@
|
||||
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
@@ -26,7 +32,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -37,6 +43,18 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
{{- if .Values.reloader.deployment.annotations }}
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.deployment.annotations }}
|
||||
{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
@@ -26,6 +27,10 @@ spec:
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
{{- if .Values.reloader.deployment.pod.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.reloader.deployment.pod.annotations | indent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 8 }}
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
@@ -46,9 +51,16 @@ spec:
|
||||
{{- if .Values.reloader.deployment.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.priorityClassName }}
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- env:
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
|
||||
env:
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.open }}
|
||||
{{- if not (empty $value) }}
|
||||
- name: {{ $name | quote }}
|
||||
@@ -79,37 +91,66 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- end }}
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) }}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreSecrets }}
|
||||
- "--resources-to-ignore=secrets"
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.ignoreConfigMaps true }}
|
||||
{{- if .Values.reloader.ignoreConfigMaps }}
|
||||
- "--resources-to-ignore=configMaps"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreNamespaces }}
|
||||
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.configmap }}"
|
||||
- "--configmap-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.configmap }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.secret }}
|
||||
- "--secret-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.secret }}"
|
||||
- "--secret-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.secret }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.auto }}
|
||||
- "--auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.auto }}"
|
||||
- "--auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.auto }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.search }}
|
||||
- "--auto-search-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.search }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.match }}
|
||||
- "--search-match-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if eq .Values.reloader.isArgoRollouts true }}
|
||||
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.reloader.deployment.resources | indent 10 }}
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.podMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.podMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.podMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.podMonitor.namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.podMonitor.interval }}
|
||||
interval: {{ .Values.reloader.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.podMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -1,7 +1,13 @@
|
||||
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
@@ -26,7 +32,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.isOpenshift) }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
@@ -37,6 +43,18 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
|
||||
29
deployments/kubernetes/chart/reloader/templates/service.yaml
Normal file
29
deployments/kubernetes/chart/reloader/templates/service.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
{{- if .Values.reloader.service }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.service.annotations }}
|
||||
{{ toYaml .Values.reloader.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.service.labels }}
|
||||
{{ toYaml .Values.reloader.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
spec:
|
||||
selector:
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- port: {{ .Values.reloader.service.port }}
|
||||
name: http
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{- end }}
|
||||
@@ -5,6 +5,11 @@ kind: ServiceAccount
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
|
||||
{{- end }}
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.annotations }}
|
||||
{{ toYaml .Values.reloader.serviceAccount.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.labels }}
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.serviceMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.serviceMonitor.labels | indent 4}}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.serviceMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.serviceMonitor.namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- targetPort: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.serviceMonitor.interval }}
|
||||
interval: {{ .Values.reloader.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.serviceMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.serviceMonitor.timeout }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -9,12 +9,17 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
nodeSelector:
|
||||
@@ -31,6 +36,10 @@ reloader:
|
||||
# operator: "Exists"
|
||||
affinity: {}
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
@@ -43,10 +52,10 @@ reloader:
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v0.0.46
|
||||
version: v0.0.77
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: "v0.0.46"
|
||||
tag: v0.0.91
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -67,6 +76,14 @@ reloader:
|
||||
# cpu: "10m"
|
||||
# memory: "128Mi"
|
||||
resources: {}
|
||||
pod:
|
||||
annotations: {}
|
||||
priorityClassName: ""
|
||||
|
||||
service: {}
|
||||
# labels: {}
|
||||
# annotations: {}
|
||||
# port: 9090
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
@@ -76,12 +93,36 @@ reloader:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
name:
|
||||
# Optional flags to pass to the Reloader entrypoint
|
||||
# Example:
|
||||
# custom_annotations:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
serviceMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
podMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
@@ -4,11 +4,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
|
||||
@@ -4,11 +4,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
|
||||
@@ -3,14 +3,18 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.46
|
||||
version: v0.0.77
|
||||
|
||||
name: reloader-reloader
|
||||
spec:
|
||||
@@ -24,19 +28,34 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.46
|
||||
version: v0.0.77
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
image: "stakater/reloader:v0.0.46"
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
args:
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
|
||||
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# Source: reloader/templates/podmonitor.yaml
|
||||
|
||||
4
deployments/kubernetes/manifests/service.yaml
Normal file
4
deployments/kubernetes/manifests/service.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
@@ -4,10 +4,14 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader
|
||||
|
||||
|
||||
4
deployments/kubernetes/manifests/servicemonitor.yaml
Normal file
4
deployments/kubernetes/manifests/servicemonitor.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
@@ -1,60 +1,18 @@
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.46
|
||||
|
||||
name: reloader-reloader
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader-reloader
|
||||
release: "reloader"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.46
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
image: "stakater/reloader:v0.0.46"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
args:
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
@@ -89,21 +47,21 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
@@ -115,16 +73,97 @@ subjects:
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
name: reloader-reloader
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader-reloader
|
||||
release: "reloader"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.77
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.77"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.46"
|
||||
chart: "reloader-v0.0.77"
|
||||
release: "reloader"
|
||||
heritage: "Tiller"
|
||||
app.kubernetes.io/managed-by: "Tiller"
|
||||
name: reloader-reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
|
||||
@@ -9,12 +9,17 @@ kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
nodeSelector:
|
||||
@@ -31,6 +36,10 @@ reloader:
|
||||
# operator: "Exists"
|
||||
affinity: {}
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
@@ -67,6 +76,13 @@ reloader:
|
||||
# cpu: "10m"
|
||||
# memory: "128Mi"
|
||||
resources: {}
|
||||
pod:
|
||||
annotations: {}
|
||||
|
||||
service: {}
|
||||
# labels: {}
|
||||
# annotations: {}
|
||||
# port: 9090
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
@@ -76,6 +92,7 @@ reloader:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
@@ -85,3 +102,27 @@ reloader:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
serviceMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
podMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
41
docs/Container Build.md
Normal file
41
docs/Container Build.md
Normal file
@@ -0,0 +1,41 @@
|
||||
|
||||
# Container Build
|
||||
> **WARNING:** As a user of Reloader there is no need to build containers, these are freely available here: https://hub.docker.com/r/stakater/reloader/
|
||||
|
||||
Multi-architecture approach is based on original work by @mdh02038: https://github.com/mdh02038/Reloader
|
||||
|
||||
Images tested on linux/arm, linux/arm64 and linux/amd64.
|
||||
|
||||
# Install Pre-Reqs
|
||||
The build environment requires the following packages (tested on Ubuntu 20.04):
|
||||
* golang
|
||||
* make
|
||||
* qemu (for arm, arm64 etc. emulation)
|
||||
* binfmt-support
|
||||
* Docker engine
|
||||
|
||||
## Docker
|
||||
Follow instructions here: https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository
|
||||
|
||||
Once installed, enable the experimental CLI:
|
||||
```
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
```
|
||||
Login, to enable publishing of packages:
|
||||
```
|
||||
sudo docker login
|
||||
```
|
||||
## Remaining Pre-Reqs
|
||||
Remaining Pre-Reqs can be installed via:
|
||||
```
|
||||
sudo apt install golang make qemu-user-static binfmt-support -y
|
||||
```
|
||||
|
||||
# Publish Multi-Architecture Image
|
||||
To build/ publish multi-arch Docker images clone repository and execute from repository root:
|
||||
```
|
||||
sudo make release-all
|
||||
```
|
||||
|
||||
# Additional Links/ Info
|
||||
* *https://medium.com/@artur.klauser/building-multi-architecture-docker-images-with-buildx-27d80f7e2408
|
||||
62
docs/Helm2-to-Helm3.md
Normal file
62
docs/Helm2-to-Helm3.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# Helm2 to Helm3 Migration
|
||||
|
||||
Follow below mentioned instructions to migrate reloader from Helm2 to Helm3
|
||||
|
||||
## Instrcutions:
|
||||
|
||||
There are 3 steps involved in migrating the reloader from Helm2 to Helm3.
|
||||
|
||||
### Step 1:
|
||||
Install the helm-2to3 plugin
|
||||
|
||||
```bash
|
||||
helm3 plugin install https://github.com/helm/helm-2to3
|
||||
|
||||
helm3 2to3 convert <release-name>
|
||||
|
||||
helm3 2to3 cleanup --release-cleanup --skip-confirmation
|
||||
```
|
||||
|
||||
### Step 2:
|
||||
Add the following Helm3 labels and annotations on reloader resources.
|
||||
|
||||
Label:
|
||||
|
||||
```yaml
|
||||
app.kubernetes.io/managed-by=Helm
|
||||
```
|
||||
Annotations:
|
||||
```yaml
|
||||
meta.helm.sh/release-name=<release-name>
|
||||
meta.helm.sh/release-namespace=<namespace>
|
||||
```
|
||||
|
||||
For example, to label and annotate the ClusterRoleBinding and ClusterRole:
|
||||
|
||||
```bash
|
||||
KIND=ClusterRoleBinding
|
||||
NAME=reloader-reloader-role-binding
|
||||
RELEASE=reloader
|
||||
NAMESPACE=kube-system
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
|
||||
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
|
||||
|
||||
KIND=ClusterRole
|
||||
NAME=reloader-reloader-role
|
||||
RELEASE=reloader
|
||||
NAMESPACE=kube-system
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
|
||||
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
|
||||
```
|
||||
|
||||
### Step 3:
|
||||
Upgrade to desired version
|
||||
```bash
|
||||
helm3 repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm3 repo update
|
||||
|
||||
helm3 upgrade <release-name> stakater/reloader --version=v0.0.72
|
||||
```
|
||||
@@ -37,7 +37,7 @@ metadata:
|
||||
```
|
||||
<small>*the default annotation can be changed with the `--secret-annotation` flag</small>
|
||||
|
||||
Above mentioned annotation are also work for `Daemonsets` and `Statefulsets`
|
||||
Above mentioned annotation are also work for `Daemonsets` `Statefulsets` and `Rollouts`
|
||||
|
||||
## How Rolling upgrade works?
|
||||
|
||||
|
||||
@@ -8,5 +8,5 @@ Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/con
|
||||
| Reloader can watch both `secrets` and `configmaps`. | ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. |
|
||||
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
|
||||
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure whether new changes breaks any old functionality or not. |
|
||||
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
|
||||
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
|
||||
|
||||
@@ -8,4 +8,4 @@ Below are the steps to use reloader with Sealed Secrets.
|
||||
8. Install Reloader.
|
||||
9. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see reloader working.
|
||||
10. Apply the updated sealed secret.
|
||||
11. Reloader will resatart the pod to use that updated secret.
|
||||
11. Reloader will restart the pod to use that updated secret.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Verify Reloader's Working
|
||||
|
||||
Reloader's working can be verified by two ways.
|
||||
Reloader's working can be verified by three ways.
|
||||
|
||||
## Verify from logs
|
||||
|
||||
@@ -49,3 +49,13 @@ After a change in `secret` or `configmap`. Run the below mentioned command and v
|
||||
```bash
|
||||
kubectl get pods <pod name> -n <namespace name>
|
||||
```
|
||||
|
||||
## Verify from metrics
|
||||
Some metrics are exported to prometheus endpoint `/metrics` on port `9090`.
|
||||
|
||||
When reloader is unable to reload, `reloader_reload_executed_total{success="false"}` metric gets incremented and when it reloads successfully, `reloader_reload_executed_total{success="true"}` gets incremented. You will be able to see the following metrics, with some other metrics, at `/metrics` endpoint.
|
||||
|
||||
```
|
||||
reloader_reload_executed_total{success="false"} 15
|
||||
reloader_reload_executed_total{success="true"} 12
|
||||
```
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
These are the key features of Reloader:
|
||||
|
||||
1. Restart pod in a depoloyment on change in linked/related configmap's or secret's
|
||||
1. Restart pod in a deployment on change in linked/related configmap's or secret's
|
||||
2. Restart pod in a daemonset on change in linked/related configmap's or secret's
|
||||
3. Restart pod in a statefulset on change in linked/related configmap's or secret's
|
||||
4. Restart pod in a rollout on change in linked/related configmap's or secret's
|
||||
|
||||
57
go.mod
57
go.mod
@@ -1,29 +1,44 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.13
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/onsi/ginkgo v1.10.2 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible
|
||||
github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372
|
||||
github.com/sirupsen/logrus v1.0.5
|
||||
github.com/spf13/cobra v0.0.0-20160722081547-f62e98d28ab7
|
||||
github.com/stretchr/testify v1.4.0 // indirect
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
|
||||
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
|
||||
k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8
|
||||
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90
|
||||
github.com/argoproj/argo-rollouts v1.0.1
|
||||
github.com/onsi/ginkgo v1.15.1 // indirect
|
||||
github.com/onsi/gomega v1.11.0 // indirect
|
||||
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
|
||||
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
|
||||
github.com/prometheus/client_golang v1.10.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/cobra v1.1.3
|
||||
k8s.io/api v0.21.1
|
||||
k8s.io/apimachinery v0.21.1
|
||||
k8s.io/client-go v0.21.1
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/openshift/api => github.com/openshift/api v3.9.1-0.20190923092516-169848dd8137+incompatible // prebase-1.16
|
||||
github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20190923092832-6afefc9bb372 // prebase-1.16
|
||||
k8s.io/api => k8s.io/api v0.0.0-20191004120104-195af9ec3521 // release-1.16
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 // kubernetes-1.16.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 // kubernetes-1.16.0
|
||||
k8s.io/api => k8s.io/api v0.20.4
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.20.4
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.20.4
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.4
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.4
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.20.4
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.20.4
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.20.4
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.4
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.4
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.4
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.4
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.4
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.20.4
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.20.4
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.4
|
||||
k8s.io/metrics => k8s.io/metrics v0.20.4
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.4
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
)
|
||||
|
||||
@@ -26,9 +28,17 @@ type VolumesFunc func(interface{}) []v1.Volume
|
||||
//UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kube.Clients, string, interface{}) error
|
||||
|
||||
//AnnotationsFunc is a generic func to return annotations
|
||||
type AnnotationsFunc func(interface{}) map[string]string
|
||||
|
||||
//PodAnnotationsFunc is a generic func to return annotations
|
||||
type PodAnnotationsFunc func(interface{}) map[string]string
|
||||
|
||||
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
type RollingUpgradeFuncs struct {
|
||||
ItemsFunc ItemsFunc
|
||||
AnnotationsFunc AnnotationsFunc
|
||||
PodAnnotationsFunc PodAnnotationsFunc
|
||||
ContainersFunc ContainersFunc
|
||||
InitContainersFunc InitContainersFunc
|
||||
UpdateFunc UpdateFunc
|
||||
@@ -38,7 +48,7 @@ type RollingUpgradeFuncs struct {
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(meta_v1.ListOptions{})
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deployments %v", err)
|
||||
}
|
||||
@@ -47,7 +57,7 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
|
||||
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(meta_v1.ListOptions{})
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list daemonSets %v", err)
|
||||
}
|
||||
@@ -56,7 +66,7 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
|
||||
// GetStatefulSetItems returns the statefulSets in given namespace
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(meta_v1.ListOptions{})
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list statefulSets %v", err)
|
||||
}
|
||||
@@ -65,25 +75,84 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
|
||||
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(meta_v1.ListOptions{})
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deploymentConfigs %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deploymentConfigs.Items)
|
||||
}
|
||||
|
||||
// GetRolloutItems returns the rollouts in given namespace
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
|
||||
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list Rollouts %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(rollouts.Items)
|
||||
}
|
||||
|
||||
// GetDeploymentAnnotations returns the annotations of given deployment
|
||||
func GetDeploymentAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetAnnotations returns the annotations of given daemonSet
|
||||
func GetDaemonSetAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetAnnotations returns the annotations of given statefulSet
|
||||
func GetStatefulSetAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
|
||||
func GetDeploymentConfigAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutAnnotations returns the annotations of given rollout
|
||||
func GetRolloutAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
|
||||
func GetDeploymentPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
|
||||
func GetDaemonSetPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
|
||||
func GetStatefulSetPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
|
||||
func GetDeploymentConfigPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
|
||||
func GetRolloutPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentContainers returns the containers of given deployment
|
||||
func GetDeploymentContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDaemonSetContainers returns the containers of given daemonset
|
||||
// GetDaemonSetContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetStatefulsetContainers returns the containers of given statefulSet
|
||||
func GetStatefulsetContainers(item interface{}) []v1.Container {
|
||||
// GetStatefulSetContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
@@ -92,18 +161,23 @@ func GetDeploymentConfigContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetRolloutContainers returns the containers of given rollout
|
||||
func GetRolloutContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentInitContainers returns the containers of given deployment
|
||||
func GetDeploymentInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDaemonSetInitContainers returns the containers of given daemonset
|
||||
// GetDaemonSetInitContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetStatefulsetInitContainers returns the containers of given statefulSet
|
||||
func GetStatefulsetInitContainers(item interface{}) []v1.Container {
|
||||
// GetStatefulSetInitContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
@@ -112,31 +186,46 @@ func GetDeploymentConfigInitContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetRolloutInitContainers returns the containers of given rollout
|
||||
func GetRolloutInitContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deployment := resource.(appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(&deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), &deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
daemonSet := resource.(appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(&daemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), &daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulset performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulset(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
// UpdateStatefulSet performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
statefulSet := resource.(appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(&statefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), &statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(&deploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), &deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
rollout := resource.(argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
|
||||
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), &rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -145,13 +234,13 @@ func GetDeploymentVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDaemonSetVolumes returns the Volumes of given daemonset
|
||||
// GetDaemonSetVolumes returns the Volumes of given daemonSet
|
||||
func GetDaemonSetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetStatefulsetVolumes returns the Volumes of given statefulSet
|
||||
func GetStatefulsetVolumes(item interface{}) []v1.Volume {
|
||||
// GetStatefulSetVolumes returns the Volumes of given statefulSet
|
||||
func GetStatefulSetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
@@ -159,3 +248,8 @@ func GetStatefulsetVolumes(item interface{}) []v1.Volume {
|
||||
func GetDeploymentConfigVolumes(item interface{}) []v1.Volume {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetRolloutVolumes returns the Volumes of given rollout
|
||||
func GetRolloutVolumes(item interface{}) []v1.Volume {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
@@ -23,15 +24,37 @@ func NewReloaderCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
// options
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
|
||||
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmapts to match the search")
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func configureLogging(logFormat string) error {
|
||||
switch logFormat {
|
||||
case "json":
|
||||
logrus.SetFormatter(&logrus.JSONFormatter{})
|
||||
default:
|
||||
// just let the library use default on empty string.
|
||||
if logFormat != "" {
|
||||
return fmt.Errorf("unsupported logging formatter: %q", logFormat)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func startReloader(cmd *cobra.Command, args []string) {
|
||||
err := configureLogging(options.LogFormat)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
logrus.Info("Starting Reloader")
|
||||
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
|
||||
if len(currentNamespace) == 0 {
|
||||
@@ -55,12 +78,14 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
collectors := metrics.SetupPrometheusEndpoint()
|
||||
|
||||
for k := range kube.ResourceMap {
|
||||
if ignoredResourcesList.Contains(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList)
|
||||
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, collectors)
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -26,11 +27,12 @@ type Controller struct {
|
||||
informer cache.Controller
|
||||
namespace string
|
||||
ignoredNamespaces util.List
|
||||
collectors metrics.Collectors
|
||||
}
|
||||
|
||||
// NewController for initializing a Controller
|
||||
func NewController(
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string) (*Controller, error) {
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
|
||||
|
||||
c := Controller{
|
||||
client: client,
|
||||
@@ -49,16 +51,14 @@ func NewController(
|
||||
c.indexer = indexer
|
||||
c.informer = informer
|
||||
c.queue = queue
|
||||
c.collectors = collectors
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
if !c.resourceInIgnoredNamespace(obj) {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
})
|
||||
}
|
||||
// Not required as reloader should update the resource in the event of any change and not in the event of any resource creation.
|
||||
// This causes the issue where reloader reloads the pods when reloader itself gets restarted as it's queue is filled with all the k8s objects as new resources.
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
@@ -77,6 +77,7 @@ func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
c.queue.Add(handler.ResourceUpdatedHandler{
|
||||
Resource: new,
|
||||
OldResource: old,
|
||||
Collectors: c.collectors,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -144,7 +145,7 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
|
||||
// This controller retries 5 times if something goes wrong. After that, it stops trying.
|
||||
if c.queue.NumRequeues(key) < 5 {
|
||||
logrus.Errorf("Error syncing events %v: %v", key, err)
|
||||
logrus.Errorf("Error syncing events: %v", err)
|
||||
|
||||
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
||||
// queue and the re-enqueue history, the key will be processed later again.
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
@@ -25,6 +27,11 @@ var (
|
||||
data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
|
||||
collectors = metrics.NewCollectors()
|
||||
)
|
||||
|
||||
const (
|
||||
sleepDuration = 3 * time.Second
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -33,7 +40,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
logrus.Infof("Creating controller")
|
||||
for k := range kube.ResourceMap {
|
||||
c, err := NewController(clients.KubernetesClient, k, namespace, []string{})
|
||||
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, collectors)
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
@@ -43,7 +50,7 @@ func TestMain(m *testing.M) {
|
||||
defer close(stop)
|
||||
go c.Run(1, stop)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
@@ -93,7 +100,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing
|
||||
if !updated {
|
||||
t.Errorf("DeploymentConfig was not updated")
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeploymentConfig(clients.OpenshiftAppsClient, namespace, configmapName)
|
||||
@@ -106,7 +113,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeploymentConfig(t *testing
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and create env var upon updating the configmap
|
||||
@@ -145,7 +152,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -158,7 +165,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and create env var upon updating the configmap
|
||||
@@ -197,7 +204,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -210,12 +217,15 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and create env var upon creating the configmap
|
||||
func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
|
||||
// TODO: Fix this test case
|
||||
t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case")
|
||||
|
||||
// Creating configmap
|
||||
configmapName := configmapNamePrefix + "-create-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
|
||||
@@ -235,14 +245,14 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.stakater.com")
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the configmap second time %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
@@ -258,7 +268,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -271,7 +281,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and update env var upon updating the configmap
|
||||
@@ -317,7 +327,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -330,11 +340,11 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Do not Perform rolling upgrade on deployment and create env var upon updating the labels configmap
|
||||
func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
|
||||
func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
|
||||
// Creating configmap
|
||||
configmapName := configmapNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
configmapClient, err := testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapName, "www.google.com")
|
||||
@@ -368,7 +378,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment
|
||||
if updated {
|
||||
t.Errorf("Deployment should not be updated by changing label")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -381,11 +391,15 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateorUpdateEnvInDeployment
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on pod and create a env var upon creating the secret
|
||||
func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
|
||||
// TODO: Fix this test case
|
||||
t.Skip("Skipping TestControllerCreatingConfigmapShouldCreateEnvInDeployment test case")
|
||||
|
||||
// Creating secret
|
||||
secretName := secretNamePrefix + "-create-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
|
||||
@@ -404,14 +418,14 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, newData)
|
||||
if err != nil {
|
||||
t.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
@@ -423,7 +437,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
@@ -440,7 +454,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on pod and create a env var upon updating the secret
|
||||
@@ -490,7 +504,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and update env var upon updating the secret
|
||||
@@ -546,11 +560,11 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
|
||||
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDeployment(t *testing.T) {
|
||||
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
|
||||
// Creating secret
|
||||
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
|
||||
@@ -595,7 +609,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDeployment(t
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on DaemonSet and create env var upon updating the configmap
|
||||
@@ -633,7 +647,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting DaemonSet
|
||||
err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -646,7 +660,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on DaemonSet and update env var upon updating the configmap
|
||||
@@ -670,7 +684,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
|
||||
t.Errorf("Configmap was not updated")
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Updating configmap for second time
|
||||
updateErr = testutil.UpdateConfigMap(configmapClient, namespace, configmapName, "", "aurorasolutions.io")
|
||||
@@ -678,7 +692,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
|
||||
t.Errorf("Configmap was not updated")
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
@@ -694,7 +708,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting DaemonSet
|
||||
err = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -707,7 +721,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on pod and create a env var upon updating the secret
|
||||
@@ -757,7 +771,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on DaemonSet and update env var upon updating the secret
|
||||
@@ -780,7 +794,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error while updating secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Updating Secret
|
||||
err = testutil.UpdateSecret(secretClient, namespace, secretName, "", updatedData)
|
||||
@@ -814,11 +828,11 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secret
|
||||
func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDaemonSet(t *testing.T) {
|
||||
func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *testing.T) {
|
||||
// Creating secret
|
||||
secretName := secretNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
secretClient, err := testutil.CreateSecret(clients.KubernetesClient, namespace, secretName, data)
|
||||
@@ -863,7 +877,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateorUpdateEnvInDaemonSet(t *
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on StatefulSet and create env var upon updating the configmap
|
||||
@@ -901,7 +915,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting StatefulSet
|
||||
err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -914,7 +928,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on StatefulSet and update env var upon updating the configmap
|
||||
@@ -958,7 +972,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSet(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting StatefulSet
|
||||
err = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapName)
|
||||
@@ -971,7 +985,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on pod and create a env var upon updating the secret
|
||||
@@ -1021,7 +1035,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on StatefulSet and update env var upon updating the secret
|
||||
@@ -1077,7 +1091,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
func TestController_resourceInIgnoredNamespace(t *testing.T) {
|
||||
|
||||
@@ -2,13 +2,15 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ResourceCreatedHandler contains new objects
|
||||
type ResourceCreatedHandler struct {
|
||||
Resource interface{}
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
}
|
||||
|
||||
// Handle processes the newly created resource
|
||||
@@ -18,7 +20,7 @@ func (r ResourceCreatedHandler) Handle() error {
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// process resource based on its type
|
||||
doRollingUpgrade(config)
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
type ResourceUpdatedHandler struct {
|
||||
Resource interface{}
|
||||
OldResource interface{}
|
||||
Collectors metrics.Collectors
|
||||
}
|
||||
|
||||
// Handle processes the updated resource
|
||||
@@ -20,7 +22,7 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
// process resource based on its type
|
||||
doRollingUpgrade(config)
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -31,7 +33,7 @@ func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
|
||||
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
|
||||
@@ -4,9 +4,11 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
@@ -17,6 +19,8 @@ import (
|
||||
func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
AnnotationsFunc: callbacks.GetDeploymentAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetDeploymentPodAnnotations,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
InitContainersFunc: callbacks.GetDeploymentInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
@@ -29,6 +33,8 @@ func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
AnnotationsFunc: callbacks.GetDaemonSetAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetDaemonSetPodAnnotations,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
InitContainersFunc: callbacks.GetDaemonSetInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
@@ -41,10 +47,12 @@ func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
InitContainersFunc: callbacks.GetStatefulsetInitContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
VolumesFunc: callbacks.GetStatefulsetVolumes,
|
||||
AnnotationsFunc: callbacks.GetStatefulSetAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetStatefulSetPodAnnotations,
|
||||
ContainersFunc: callbacks.GetStatefulSetContainers,
|
||||
InitContainersFunc: callbacks.GetStatefulSetInitContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulSet,
|
||||
VolumesFunc: callbacks.GetStatefulSetVolumes,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
}
|
||||
@@ -53,6 +61,8 @@ func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentConfigItems,
|
||||
AnnotationsFunc: callbacks.GetDeploymentConfigAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetDeploymentConfigPodAnnotations,
|
||||
ContainersFunc: callbacks.GetDeploymentConfigContainers,
|
||||
InitContainersFunc: callbacks.GetDeploymentConfigInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDeploymentConfig,
|
||||
@@ -61,34 +71,78 @@ func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config) {
|
||||
clients := kube.GetClients()
|
||||
|
||||
rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs())
|
||||
rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs())
|
||||
rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs())
|
||||
|
||||
if kube.IsOpenshift {
|
||||
rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs())
|
||||
// GetArgoRolloutRollingUpgradeFuncs returns all callback funcs for a rollout
|
||||
func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetRolloutItems,
|
||||
AnnotationsFunc: callbacks.GetRolloutAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetRolloutPodAnnotations,
|
||||
ContainersFunc: callbacks.GetRolloutContainers,
|
||||
InitContainersFunc: callbacks.GetRolloutInitContainers,
|
||||
UpdateFunc: callbacks.UpdateRollout,
|
||||
VolumesFunc: callbacks.GetRolloutVolumes,
|
||||
ResourceType: "Rollout",
|
||||
}
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) {
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs)
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kube.IsOpenshift {
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.IsArgoRollouts == "true" {
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) error {
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
var err error
|
||||
|
||||
for _, i := range items {
|
||||
// find correct annotation and update the resource
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
|
||||
annotations := upgradeFuncs.AnnotationsFunc(i)
|
||||
annotationValue, found := annotations[config.Annotation]
|
||||
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
|
||||
if !found && !foundAuto && !foundSearchAnn {
|
||||
annotations = upgradeFuncs.PodAnnotationsFunc(i)
|
||||
annotationValue = annotations[config.Annotation]
|
||||
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
|
||||
}
|
||||
result := constants.NotUpdated
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
if err == nil && reloaderEnabled {
|
||||
@@ -98,6 +152,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
if result != constants.Updated && annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
result = updateContainers(upgradeFuncs, i, config, false)
|
||||
if result == constants.Updated {
|
||||
@@ -107,28 +162,59 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
}
|
||||
}
|
||||
|
||||
if result != constants.Updated && searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
result = updateContainers(upgradeFuncs, i, config, true)
|
||||
}
|
||||
}
|
||||
|
||||
if result == constants.Updated {
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
resourceName := util.ToObjectMeta(i).Name
|
||||
if err != nil {
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
return err
|
||||
} else {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
|
||||
for i := range volumes {
|
||||
if mountType == constants.ConfigmapEnvVarPostfix && volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
|
||||
return volumes[i].Name
|
||||
} else if mountType == constants.SecretEnvVarPostfix && volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
|
||||
return volumes[i].Name
|
||||
if mountType == constants.ConfigmapEnvVarPostfix {
|
||||
if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
|
||||
if volumes[i].Projected != nil {
|
||||
for j := range volumes[i].Projected.Sources {
|
||||
if volumes[i].Projected.Sources[j].ConfigMap != nil && volumes[i].Projected.Sources[j].ConfigMap.Name == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if mountType == constants.SecretEnvVarPostfix {
|
||||
if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
|
||||
if volumes[i].Projected != nil {
|
||||
for j := range volumes[i].Projected.Sources {
|
||||
if volumes[i].Projected.Sources[j].Secret != nil && volumes[i].Projected.Sources[j].Secret.Name == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -212,7 +298,7 @@ func getContainerToUpdate(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
|
||||
|
||||
func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
var result constants.Result
|
||||
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
container := getContainerToUpdate(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
@@ -220,12 +306,12 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
|
||||
}
|
||||
|
||||
//update if env var exists
|
||||
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envar, config.SHAValue)
|
||||
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue)
|
||||
|
||||
// if no existing env var exists lets create one
|
||||
if result == constants.NoEnvVarFound {
|
||||
e := v1.EnvVar{
|
||||
Name: envar,
|
||||
Name: envVar,
|
||||
Value: config.SHAValue,
|
||||
}
|
||||
container.Env = append(container.Env, e)
|
||||
@@ -234,11 +320,11 @@ func updateContainers(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface
|
||||
return result
|
||||
}
|
||||
|
||||
func updateEnvVar(containers []v1.Container, envar string, shaData string) constants.Result {
|
||||
func updateEnvVar(containers []v1.Container, envVar string, shaData string) constants.Result {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
if envs[j].Name == envVar {
|
||||
if envs[j].Value != shaData {
|
||||
envs[j].Value = shaData
|
||||
return constants.Updated
|
||||
|
||||
@@ -1,32 +1,45 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
var (
|
||||
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
|
||||
namespace = "test-handler-" + testutil.RandSeq(5)
|
||||
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
|
||||
secretName = "testsecret-handler-" + testutil.RandSeq(5)
|
||||
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
|
||||
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
|
||||
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
|
||||
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
|
||||
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
|
||||
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
|
||||
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
|
||||
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
|
||||
clients = kube.Clients{KubernetesClient: testclient.NewSimpleClientset()}
|
||||
namespace = "test-handler-" + testutil.RandSeq(5)
|
||||
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
|
||||
secretName = "testsecret-handler-" + testutil.RandSeq(5)
|
||||
projectedConfigMapName = "testprojectedconfigmap-handler-" + testutil.RandSeq(5)
|
||||
projectedSecretName = "testprojectedsecret-handler-" + testutil.RandSeq(5)
|
||||
configmapWithInitContainer = "testconfigmapInitContainerhandler-" + testutil.RandSeq(5)
|
||||
secretWithInitContainer = "testsecretWithInitContainer-handler-" + testutil.RandSeq(5)
|
||||
projectedConfigMapWithInitContainer = "testProjectedConfigMapWithInitContainer-handler" + testutil.RandSeq(5)
|
||||
projectedSecretWithInitContainer = "testProjectedSecretWithInitContainer-handler" + testutil.RandSeq(5)
|
||||
configmapWithInitEnv = "configmapWithInitEnv-" + testutil.RandSeq(5)
|
||||
secretWithInitEnv = "secretWithInitEnv-handler-" + testutil.RandSeq(5)
|
||||
configmapWithEnvName = "testconfigmapWithEnv-handler-" + testutil.RandSeq(5)
|
||||
configmapWithEnvFromName = "testconfigmapWithEnvFrom-handler-" + testutil.RandSeq(5)
|
||||
secretWithEnvName = "testsecretWithEnv-handler-" + testutil.RandSeq(5)
|
||||
secretWithEnvFromName = "testsecretWithEnvFrom-handler-" + testutil.RandSeq(5)
|
||||
configmapWithPodAnnotations = "testconfigmapPodAnnotations-handler-" + testutil.RandSeq(5)
|
||||
configmapWithBothAnnotations = "testconfigmapBothAnnotations-handler-" + testutil.RandSeq(5)
|
||||
configmapAnnotated = "testconfigmapAnnotated-handler-" + testutil.RandSeq(5)
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -60,6 +73,30 @@ func setup() {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating configmap will be used in projected volume
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, projectedConfigMapName, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret will be used in projected volume
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, projectedSecretName, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating configmap will be used in projected volume in init containers
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret will be used in projected volume in init containers
|
||||
_, err = testutil.CreateSecret(clients.KubernetesClient, namespace, projectedSecretWithInitContainer, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
@@ -104,6 +141,11 @@ func setup() {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
_, err = testutil.CreateConfigMap(clients.KubernetesClient, namespace, configmapWithPodAnnotations, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, configmapName, namespace, true)
|
||||
if err != nil {
|
||||
@@ -116,6 +158,30 @@ func setup() {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap in projected volume
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, projectedConfigMapName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap in projected volume mounted in init container
|
||||
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, projectedConfigMapWithInitContainer, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with secret in projected volume
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, projectedSecretName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with secret in projected volume mounted in init container
|
||||
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, projectedSecretWithInitContainer, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with secret mounted in init container
|
||||
_, err = testutil.CreateDeploymentWithInitContainer(clients.KubernetesClient, secretWithInitContainer, namespace, true)
|
||||
if err != nil {
|
||||
@@ -164,6 +230,17 @@ func setup() {
|
||||
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with envFrom source as secret
|
||||
_, err = testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(
|
||||
clients.KubernetesClient,
|
||||
configmapAnnotated,
|
||||
namespace,
|
||||
map[string]string{"reloader.stakater.com/search": "true"},
|
||||
)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret configmap as envFrom source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with configmap
|
||||
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapName, namespace, true)
|
||||
if err != nil {
|
||||
@@ -176,6 +253,18 @@ func setup() {
|
||||
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with configmap in projected volume
|
||||
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, projectedConfigMapName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with secret in projected volume
|
||||
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, projectedSecretName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with env var source as configmap
|
||||
_, err = testutil.CreateDaemonSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
|
||||
if err != nil {
|
||||
@@ -200,6 +289,18 @@ func setup() {
|
||||
logrus.Errorf("Error in StatefulSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with configmap in projected volume
|
||||
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, projectedConfigMapName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with secret in projected volume
|
||||
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, projectedSecretName, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with env var source as configmap
|
||||
_, err = testutil.CreateStatefulSet(clients.KubernetesClient, configmapWithEnvName, namespace, false)
|
||||
if err != nil {
|
||||
@@ -212,6 +313,17 @@ func setup() {
|
||||
logrus.Errorf("Error in StatefulSet with secret configmap as env var source creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with pod annotations
|
||||
_, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, configmapWithPodAnnotations, namespace, false)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with pod annotations: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with both annotations
|
||||
_, err = testutil.CreateDeploymentWithPodAnnotations(clients.KubernetesClient, configmapWithBothAnnotations, namespace, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with both annotations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func teardown() {
|
||||
@@ -227,6 +339,30 @@ func teardown() {
|
||||
logrus.Errorf("Error while deleting deployment with secret %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with configmap in projected volume
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedConfigMapName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with configmap in projected volume mounted in init container
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret in projected volume
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedSecretName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret in projected volume mounted in init container
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, projectedSecretWithInitContainer)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with configmap as env var source
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithEnvName)
|
||||
if deploymentError != nil {
|
||||
@@ -275,6 +411,24 @@ func teardown() {
|
||||
logrus.Errorf("Error while deleting deployment with secret as envFrom source %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with pod annotations
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithPodAnnotations)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with pod annotations %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with both annotations
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapWithBothAnnotations)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with both annotations %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with search annotation
|
||||
deploymentError = testutil.DeleteDeployment(clients.KubernetesClient, namespace, configmapAnnotated)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with search annotation %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting DaemonSet with configmap
|
||||
daemonSetError := testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapName)
|
||||
if daemonSetError != nil {
|
||||
@@ -287,6 +441,18 @@ func teardown() {
|
||||
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting DaemonSet with configmap in projected volume
|
||||
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, projectedConfigMapName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret in projected volume
|
||||
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, projectedSecretName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with configmap as env var source
|
||||
daemonSetError = testutil.DeleteDaemonSet(clients.KubernetesClient, namespace, configmapWithEnvName)
|
||||
if daemonSetError != nil {
|
||||
@@ -311,6 +477,18 @@ func teardown() {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting StatefulSet with configmap in projected volume
|
||||
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, projectedConfigMapName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret in projected volume
|
||||
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, projectedSecretName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting StatefulSet with configmap as env var source
|
||||
statefulSetError = testutil.DeleteStatefulSet(clients.KubernetesClient, namespace, configmapWithEnvName)
|
||||
if statefulSetError != nil {
|
||||
@@ -335,6 +513,30 @@ func teardown() {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap used in projected volume
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, projectedConfigMapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Secret used in projected volume
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, projectedSecretName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap used in projected volume in init containers
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, projectedConfigMapWithInitContainer)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used projected volume in init containers
|
||||
err = testutil.DeleteSecret(clients.KubernetesClient, namespace, projectedSecretWithInitContainer)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
|
||||
// Deleting Configmap used as env var source
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithEnvName)
|
||||
if err != nil {
|
||||
@@ -383,6 +585,11 @@ func teardown() {
|
||||
logrus.Errorf("Error while deleting the secret used as env var source in init container %v", err)
|
||||
}
|
||||
|
||||
err = testutil.DeleteConfigMap(clients.KubernetesClient, namespace, configmapWithPodAnnotations)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap used with pod annotations: %v", err)
|
||||
}
|
||||
|
||||
// Deleting namespace
|
||||
testutil.DeleteNamespace(namespace, clients.KubernetesClient)
|
||||
|
||||
@@ -398,12 +605,20 @@ func getConfigWithAnnotations(resourceType string, name string, shaData string,
|
||||
}
|
||||
}
|
||||
|
||||
func getCollectors() metrics.Collectors {
|
||||
return metrics.NewCollectors()
|
||||
}
|
||||
|
||||
var labelSucceeded = prometheus.Labels{"success": "true"}
|
||||
var labelFailed = prometheus.Labels{"success": "false"}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
@@ -414,14 +629,132 @@ func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapInProjectedVolume(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
// Un-used function
|
||||
// func createConfigMap(clients *kube.Clients, namespace, name string, annotations map[string]string) (*core_v1.ConfigMap, error) {
|
||||
// configmapObj := testutil.GetConfigmap(namespace, name, "www.google.com")
|
||||
// configmapObj.Annotations = annotations
|
||||
// return clients.KubernetesClient.CoreV1().ConfigMaps(namespace).Create(configmapObj)
|
||||
// }
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotation(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
|
||||
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "true"}
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNoTriggers(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
|
||||
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"}
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if updated {
|
||||
t.Errorf("Deployment was updated unexpectedly")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 {
|
||||
t.Errorf("Counter was increased unexpectedly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapViaSearchAnnotationNotMapped(t *testing.T) {
|
||||
deployment, err := testutil.CreateDeploymentWithEnvVarSourceAndAnnotations(
|
||||
clients.KubernetesClient,
|
||||
configmapAnnotated+"-different",
|
||||
namespace,
|
||||
map[string]string{"reloader.stakater.com/search": "true"},
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create deployment with search annotation.")
|
||||
}
|
||||
defer func() {
|
||||
_ = clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(context.TODO(), deployment.Name, v1.DeleteOptions{})
|
||||
}()
|
||||
// defer clients.KubernetesClient.AppsV1().Deployments(namespace).Delete(deployment.Name, &v1.DeleteOptions{})
|
||||
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapAnnotated, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapAnnotated, shaData, "")
|
||||
config.ResourceAnnotations = map[string]string{"reloader.stakater.com/match": "false"}
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if updated {
|
||||
t.Errorf("Deployment was updated unexpectedly")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) > 0 {
|
||||
t.Errorf("Counter was increased unexpectedly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitContainer, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
@@ -432,14 +765,42 @@ func TestRollingUpgradeForDeploymentWithConfigmapInInitContainer(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapInProjectVolumeInInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapWithInitContainer, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapWithInitContainer, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap in projected volume")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
|
||||
@@ -450,14 +811,19 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVar(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithInitEnv, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithInitEnv, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
|
||||
@@ -468,14 +834,19 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarInInitContainer(t *test
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFrom(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvFromName, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap used as env var")
|
||||
@@ -486,14 +857,19 @@ func TestRollingUpgradeForDeploymentWithConfigmapAsEnvVarFrom(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
@@ -504,14 +880,42 @@ func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretInProjectedVolume(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
@@ -522,14 +926,42 @@ func TestRollingUpgradeForDeploymentWithSecretinInitContainer(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretInProjectedVolumeinInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretWithInitContainer, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretWithInitContainer, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret in projected volume")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvName, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
@@ -540,14 +972,19 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVar(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFrom(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithEnvFromName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithEnvFromName, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
@@ -558,14 +995,19 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarFrom(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainer(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretWithInitEnv, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretWithInitEnv, shaData, options.ReloaderAutoAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
@@ -576,14 +1018,19 @@ func TestRollingUpgradeForDeploymentWithSecretAsEnvVarInInitContainer(t *testing
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.facebook.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
|
||||
@@ -594,14 +1041,42 @@ func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmapInProjectedVolume(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.facebook.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap in projected volume")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithEnvName, "www.facebook.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithEnvName, shaData, options.ReloaderAutoAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap used as env var")
|
||||
@@ -612,14 +1087,19 @@ func TestRollingUpgradeForDaemonSetWithConfigmapAsEnvVar(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LmZhY2Vib29rLmNvbQ==")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
|
||||
@@ -630,14 +1110,42 @@ func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithSecretInProjectedVolume(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "d3d3LmZhY2Vib29rLmNvbQ==")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
daemonSetFuncs := GetDaemonSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, daemonSetFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with secret in projected volume")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.twitter.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
|
||||
@@ -648,14 +1156,42 @@ func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithConfigmapInProjectedVolume(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, projectedConfigMapName, "www.twitter.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, projectedConfigMapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with configmap in projected volume")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LnR3aXR0ZXIuY29t")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, secretName, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, statefulSetFuncs)
|
||||
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
|
||||
@@ -666,4 +1202,94 @@ func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithSecretInProjectedVolume(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, projectedSecretName, "d3d3LnR3aXR0ZXIuY29t")
|
||||
config := getConfigWithAnnotations(constants.SecretEnvVarPostfix, projectedSecretName, shaData, options.SecretUpdateOnChangeAnnotation)
|
||||
statefulSetFuncs := GetStatefulSetRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, statefulSetFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with secret in projected volume")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(clients, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithPodAnnotations(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapWithPodAnnotations, "www.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapWithPodAnnotations, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
collectors := getCollectors()
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with pod annotations")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + constants.ConfigmapEnvVarPostfix
|
||||
items := deploymentFuncs.ItemsFunc(clients, config.Namespace)
|
||||
var foundPod, foundBoth bool
|
||||
for _, i := range items {
|
||||
name := util.ToObjectMeta(i).Name
|
||||
if name == configmapWithPodAnnotations {
|
||||
containers := deploymentFuncs.ContainersFunc(i)
|
||||
updated := testutil.GetResourceSHA(containers, envName)
|
||||
if updated != config.SHAValue {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
foundPod = true
|
||||
}
|
||||
if name == configmapWithBothAnnotations {
|
||||
containers := deploymentFuncs.ContainersFunc(i)
|
||||
updated := testutil.GetResourceSHA(containers, envName)
|
||||
if updated == config.SHAValue {
|
||||
t.Errorf("Deployment was updated")
|
||||
}
|
||||
foundBoth = true
|
||||
}
|
||||
}
|
||||
if !foundPod {
|
||||
t.Errorf("Deployment with pod annotations was not found")
|
||||
}
|
||||
if !foundBoth {
|
||||
t.Errorf("Deployment with both annotations was not found")
|
||||
}
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelSucceeded)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailedRollingUpgrade(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "fail.stakater.com")
|
||||
config := getConfigWithAnnotations(constants.ConfigmapEnvVarPostfix, configmapName, shaData, options.ConfigmapUpdateOnChangeAnnotation)
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
deploymentFuncs.UpdateFunc = func(_ kube.Clients, _ string, _ interface{}) error {
|
||||
return fmt.Errorf("error")
|
||||
}
|
||||
collectors := getCollectors()
|
||||
|
||||
_ = PerformRollingUpgrade(clients, config, deploymentFuncs, collectors)
|
||||
|
||||
if promtestutil.ToFloat64(collectors.Reloaded.With(labelFailed)) != 1 {
|
||||
t.Errorf("Counter was not increased")
|
||||
}
|
||||
}
|
||||
|
||||
43
internal/pkg/metrics/prometheus.go
Normal file
43
internal/pkg/metrics/prometheus.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type Collectors struct {
|
||||
Reloaded *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func NewCollectors() Collectors {
|
||||
reloaded := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "reload_executed_total",
|
||||
Help: "Counter of reloads executed by Reloader.",
|
||||
},
|
||||
[]string{"success"},
|
||||
)
|
||||
|
||||
//set 0 as default value
|
||||
reloaded.With(prometheus.Labels{"success": "true"}).Add(0)
|
||||
reloaded.With(prometheus.Labels{"success": "false"}).Add(0)
|
||||
|
||||
return Collectors{
|
||||
Reloaded: reloaded,
|
||||
}
|
||||
}
|
||||
|
||||
func SetupPrometheusEndpoint() Collectors {
|
||||
collectors := NewCollectors()
|
||||
prometheus.MustRegister(collectors.Reloaded)
|
||||
|
||||
go func() {
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
logrus.Fatal(http.ListenAndServe(":9090", nil))
|
||||
}()
|
||||
|
||||
return collectors
|
||||
}
|
||||
@@ -1,10 +1,22 @@
|
||||
package options
|
||||
|
||||
var (
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// configmaps specified by name
|
||||
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
|
||||
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets
|
||||
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// secrets specified by name
|
||||
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
|
||||
// ReloaderAutoAnnotation is an annotation to detect changes in secrets
|
||||
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
|
||||
// AutoSearchAnnotation is an annotation to detect changes in
|
||||
// configmaps or triggers with the SearchMatchAnnotation
|
||||
AutoSearchAnnotation = "reloader.stakater.com/search"
|
||||
// SearchMatchAnnotation is an annotation to tag secrets to be found with
|
||||
// AutoSearchAnnotation
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// Adds support for argo rollouts
|
||||
IsArgoRollouts = "false"
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -33,7 +34,7 @@ var (
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
|
||||
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to create namespace for testing %v", err)
|
||||
} else {
|
||||
@@ -43,7 +44,7 @@ func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
|
||||
// DeleteNamespace deletes namespace for testing
|
||||
func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to delete namespace that was created for testing %v", err)
|
||||
} else {
|
||||
@@ -92,6 +93,38 @@ func getEnvVarSources(name string) []v1.EnvFromSource {
|
||||
|
||||
func getVolumes(name string) []v1.Volume {
|
||||
return []v1.Volume{
|
||||
{
|
||||
Name: "projectedconfigmap",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "projectedsecret",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "configmap",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
@@ -123,6 +156,14 @@ func getVolumeMounts(name string) []v1.VolumeMount {
|
||||
MountPath: "etc/sec",
|
||||
Name: "secret",
|
||||
},
|
||||
{
|
||||
MountPath: "etc/projectedconfig",
|
||||
Name: "projectedconfigmap",
|
||||
},
|
||||
{
|
||||
MountPath: "etc/projectedsec",
|
||||
Name: "projectedsecret",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,6 +430,28 @@ func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *ap
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
|
||||
},
|
||||
}
|
||||
if !both {
|
||||
deployment.ObjectMeta.Annotations = nil
|
||||
}
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true)
|
||||
return deployment
|
||||
}
|
||||
|
||||
// GetDaemonSet provides daemonset for testing
|
||||
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
@@ -501,11 +564,11 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
|
||||
}
|
||||
|
||||
// GetResourceSHA returns the SHA value of given environment variable
|
||||
func GetResourceSHA(containers []v1.Container, envar string) string {
|
||||
func GetResourceSHA(containers []v1.Container, envVar string) string {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
if envs[j].Name == envVar {
|
||||
return envs[j].Value
|
||||
}
|
||||
}
|
||||
@@ -535,7 +598,7 @@ func ConvertResourceToSHA(resourceType string, namespace string, resourceName st
|
||||
func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) {
|
||||
logrus.Infof("Creating configmap")
|
||||
configmapClient := client.CoreV1().ConfigMaps(namespace)
|
||||
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
|
||||
_, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return configmapClient, err
|
||||
}
|
||||
@@ -544,7 +607,7 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
|
||||
logrus.Infof("Creating secret")
|
||||
secretClient := client.CoreV1().Secrets(namespace)
|
||||
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
|
||||
_, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return secretClient, err
|
||||
}
|
||||
@@ -559,7 +622,7 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -574,7 +637,7 @@ func CreateDeploymentConfig(client appsclient.Interface, deploymentName string,
|
||||
} else {
|
||||
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(deploymentConfigObj)
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{})
|
||||
time.Sleep(5 * time.Second)
|
||||
return deploymentConfig, err
|
||||
}
|
||||
@@ -589,7 +652,7 @@ func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentNa
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -599,7 +662,30 @@ func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentNam
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deployment, err := deploymentClient.Create(deploymentObj)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
|
||||
}
|
||||
|
||||
// CreateDeploymentWithPodAnnotations creates a deployment in given namespace and returns the Deployment
|
||||
func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentName string, namespace string, both bool) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentWithEnvVarSourceAndAnnotations returns a deployment in given
|
||||
// namespace with given annotations.
|
||||
func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface, deploymentName string, namespace string, annotations map[string]string) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deploymentObj.Annotations = annotations
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
@@ -614,7 +700,7 @@ func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespac
|
||||
} else {
|
||||
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
|
||||
}
|
||||
daemonset, err := daemonsetClient.Create(daemonsetObj)
|
||||
daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonset, err
|
||||
}
|
||||
@@ -629,7 +715,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
} else {
|
||||
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
|
||||
}
|
||||
statefulset, err := statefulsetClient.Create(statefulsetObj)
|
||||
statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulset, err
|
||||
}
|
||||
@@ -637,7 +723,7 @@ func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, name
|
||||
// DeleteDeployment creates a deployment in given namespace and returns the error if any
|
||||
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
|
||||
logrus.Infof("Deleting Deployment")
|
||||
deploymentError := client.AppsV1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
|
||||
deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentError
|
||||
}
|
||||
@@ -645,7 +731,7 @@ func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentN
|
||||
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
|
||||
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
|
||||
logrus.Infof("Deleting DeploymentConfig")
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(deploymentConfigName, &metav1.DeleteOptions{})
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentConfigError
|
||||
}
|
||||
@@ -653,7 +739,7 @@ func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deplo
|
||||
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
|
||||
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
|
||||
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
|
||||
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
|
||||
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonsetError
|
||||
}
|
||||
@@ -661,7 +747,7 @@ func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetNam
|
||||
// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any
|
||||
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
|
||||
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
|
||||
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
|
||||
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulsetError
|
||||
}
|
||||
@@ -675,7 +761,7 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
|
||||
} else {
|
||||
configmap = GetConfigmap(namespace, configmapName, data)
|
||||
}
|
||||
_, updateErr := configmapClient.Update(configmap)
|
||||
_, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
@@ -689,7 +775,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
} else {
|
||||
secret = GetSecret(namespace, secretName, data)
|
||||
}
|
||||
_, updateErr := secretClient.Update(secret)
|
||||
_, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
@@ -697,7 +783,7 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
|
||||
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
|
||||
logrus.Infof("Deleting configmap %q.\n", configmapName)
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
@@ -705,7 +791,7 @@ func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
// DeleteSecret deletes a secret in given namespace and returns the error if any
|
||||
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
|
||||
logrus.Infof("Deleting secret %q.\n", secretName)
|
||||
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
@@ -727,6 +813,7 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
matches := false
|
||||
@@ -735,11 +822,16 @@ func VerifyResourceUpdate(clients kube.Clients, config util.Config, envVarPostfi
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if searchAnnotationValue == "true" {
|
||||
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
|
||||
matches = true
|
||||
}
|
||||
}
|
||||
|
||||
if matches {
|
||||
|
||||
@@ -8,31 +8,34 @@ import (
|
||||
|
||||
//Config contains rolling upgrade configuration parameters
|
||||
type Config struct {
|
||||
Namespace string
|
||||
ResourceName string
|
||||
Annotation string
|
||||
SHAValue string
|
||||
Type string
|
||||
Namespace string
|
||||
ResourceName string
|
||||
ResourceAnnotations map[string]string
|
||||
Annotation string
|
||||
SHAValue string
|
||||
Type string
|
||||
}
|
||||
|
||||
// GetConfigmapConfig provides utility config for configmap
|
||||
func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
|
||||
return Config{
|
||||
Namespace: configmap.Namespace,
|
||||
ResourceName: configmap.Name,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
SHAValue: GetSHAfromConfigmap(configmap.Data),
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
Namespace: configmap.Namespace,
|
||||
ResourceName: configmap.Name,
|
||||
ResourceAnnotations: configmap.Annotations,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
SHAValue: GetSHAfromConfigmap(configmap),
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
}
|
||||
}
|
||||
|
||||
// GetSecretConfig provides utility config for secret
|
||||
func GetSecretConfig(secret *v1.Secret) Config {
|
||||
return Config{
|
||||
Namespace: secret.Namespace,
|
||||
ResourceName: secret.Name,
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
SHAValue: GetSHAfromSecret(secret.Data),
|
||||
Type: constants.SecretEnvVarPostfix,
|
||||
Namespace: secret.Namespace,
|
||||
ResourceName: secret.Name,
|
||||
ResourceAnnotations: secret.Annotations,
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
SHAValue: GetSHAfromSecret(secret.Data),
|
||||
Type: constants.SecretEnvVarPostfix,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,10 +2,12 @@ package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ConvertToEnvVarName converts the given text into a usable env var
|
||||
@@ -29,11 +31,14 @@ func ConvertToEnvVarName(text string) string {
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func GetSHAfromConfigmap(data map[string]string) string {
|
||||
func GetSHAfromConfigmap(configmap *v1.ConfigMap) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
for k, v := range configmap.Data {
|
||||
values = append(values, k+"="+v)
|
||||
}
|
||||
for k, v := range configmap.BinaryData {
|
||||
values = append(values, k+"="+base64.StdEncoding.EncodeToString(v))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestConvertToEnvVarName(t *testing.T) {
|
||||
@@ -11,3 +13,35 @@ func TestConvertToEnvVarName(t *testing.T) {
|
||||
t.Errorf("Failed to convert data into environment variable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHashFromConfigMap(t *testing.T) {
|
||||
data := map[*v1.ConfigMap]string{
|
||||
{
|
||||
Data: map[string]string{"test": "test"},
|
||||
}: "Only Data",
|
||||
{
|
||||
Data: map[string]string{"test": "test"},
|
||||
BinaryData: map[string][]byte{"bintest": []byte("test")},
|
||||
}: "Both Data and BinaryData",
|
||||
{
|
||||
BinaryData: map[string][]byte{"bintest": []byte("test")},
|
||||
}: "Only BinaryData",
|
||||
}
|
||||
converted := map[string]string{}
|
||||
for cm, cmName := range data {
|
||||
converted[cmName] = GetSHAfromConfigmap(cm)
|
||||
}
|
||||
|
||||
// Test that the has for each configmap is really unique
|
||||
for cmName, cmHash := range converted {
|
||||
count := 0
|
||||
for _, cmHash2 := range converted {
|
||||
if cmHash == cmHash2 {
|
||||
count++
|
||||
}
|
||||
}
|
||||
if count > 1 {
|
||||
t.Errorf("Found duplicate hashes for %v", cmName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
14
okteto.yml
Normal file
14
okteto.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: reloader-reloader
|
||||
image: okteto/golang:1
|
||||
command: bash
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_PTRACE
|
||||
volumes:
|
||||
- /go/pkg/
|
||||
- /root/.cache/go-build/
|
||||
sync:
|
||||
- .:/app
|
||||
forward:
|
||||
- 2345:2345
|
||||
@@ -1,10 +1,12 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
|
||||
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
type Clients struct {
|
||||
KubernetesClient kubernetes.Interface
|
||||
OpenshiftAppsClient appsclient.Interface
|
||||
ArgoRolloutClient argorollout.Interface
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -38,18 +41,34 @@ func GetClients() Clients {
|
||||
}
|
||||
}
|
||||
|
||||
var rolloutClient *argorollout.Clientset
|
||||
|
||||
rolloutClient, err = GetArgoRolloutClient()
|
||||
if err != nil {
|
||||
logrus.Warnf("Unable to create ArgoRollout client error = %v", err)
|
||||
}
|
||||
|
||||
return Clients{
|
||||
KubernetesClient: client,
|
||||
OpenshiftAppsClient: appsClient,
|
||||
ArgoRolloutClient: rolloutClient,
|
||||
}
|
||||
}
|
||||
|
||||
func GetArgoRolloutClient() (*argorollout.Clientset, error) {
|
||||
config, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return argorollout.NewForConfig(config)
|
||||
}
|
||||
|
||||
func isOpenshift() bool {
|
||||
client, err := GetKubernetesClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do().Raw()
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw()
|
||||
if err == nil {
|
||||
logrus.Info("Environment: Openshift")
|
||||
return true
|
||||
@@ -78,7 +97,6 @@ func GetKubernetesClient() (*kubernetes.Clientset, error) {
|
||||
|
||||
func getConfig() (*rest.Config, error) {
|
||||
var config *rest.Config
|
||||
var err error
|
||||
kubeconfigPath := os.Getenv("KUBECONFIG")
|
||||
if kubeconfigPath == "" {
|
||||
kubeconfigPath = os.Getenv("HOME") + "/.kube/config"
|
||||
@@ -95,9 +113,6 @@ func getConfig() (*rest.Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user