mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
522 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
81e74fe830 | ||
|
|
50791ad51a | ||
|
|
1c7190884a | ||
|
|
e0fcc3bfa6 | ||
|
|
504b5a8eb0 | ||
|
|
488eaa9bef | ||
|
|
676c3703aa | ||
|
|
deec4df125 | ||
|
|
eedc8e81d0 | ||
|
|
28456ffafe | ||
|
|
a7c3ae37aa | ||
|
|
d043bcf7be | ||
|
|
72a1c59cac | ||
|
|
6299b1d8e9 | ||
|
|
11ae057b0a | ||
|
|
d34c99baf4 | ||
|
|
b7e83b74d8 | ||
|
|
919f75bb62 | ||
|
|
16079bd1d4 | ||
|
|
401d4227d1 | ||
|
|
7f9f32ca58 | ||
|
|
6937b8120b | ||
|
|
be80ce35b2 | ||
|
|
99349ce361 | ||
|
|
3a1b808169 | ||
|
|
798079eb53 | ||
|
|
98749f2c9b | ||
|
|
3a675696cd | ||
|
|
632eeaa527 | ||
|
|
9c85ce404b | ||
|
|
f8f8afca88 | ||
|
|
11dc048709 | ||
|
|
328442c121 | ||
|
|
4304880b6b | ||
|
|
aef99a7bb8 | ||
|
|
e387b21ed6 | ||
|
|
b6de33d501 | ||
|
|
48160e0414 | ||
|
|
d998c1a19e | ||
|
|
0b7ca82218 | ||
|
|
88fe843285 | ||
|
|
cf31fed9d3 | ||
|
|
ba7f7537fb | ||
|
|
499ecf9da5 | ||
|
|
6188811b94 | ||
|
|
0f2395309f | ||
|
|
41d4fa56ca | ||
|
|
bf21677357 | ||
|
|
c58d0965d5 | ||
|
|
ec1f7a68de | ||
|
|
68a353d097 | ||
|
|
c82886c921 | ||
|
|
4b9844f5c3 | ||
|
|
1e4016587c | ||
|
|
198e5631d4 | ||
|
|
f21ad29188 | ||
|
|
cd7ec500a7 | ||
|
|
030e51351a | ||
|
|
8703c150e0 | ||
|
|
1d7192180f | ||
|
|
9d1733200a | ||
|
|
841db3c829 | ||
|
|
1155c981d6 | ||
|
|
d29c99ffef | ||
|
|
3b572568be | ||
|
|
3e5b1d09b8 | ||
|
|
61b7a88462 | ||
|
|
d9d986f1f2 | ||
|
|
42a25bfe64 | ||
|
|
df667b9c15 | ||
|
|
f1c71731d9 | ||
|
|
e2b14ab7b5 | ||
|
|
4592bd4331 | ||
|
|
e380fbaf03 | ||
|
|
2bce1352a3 | ||
|
|
5b3b617f06 | ||
|
|
5040a4236a | ||
|
|
f6cbc005fc | ||
|
|
91774c941f | ||
|
|
db0e127563 | ||
|
|
3671d33447 | ||
|
|
e85176b5a7 | ||
|
|
7941de60ac | ||
|
|
565612e421 | ||
|
|
31e247e3ae | ||
|
|
1e79b86f72 | ||
|
|
b5b684c67b | ||
|
|
bbc6bd2dea | ||
|
|
61ce150d7c | ||
|
|
56e83ecde9 | ||
|
|
c33876508c | ||
|
|
55ea2e430e | ||
|
|
4beefc3f43 | ||
|
|
3b1d30141c | ||
|
|
fa75df8e96 | ||
|
|
21087aaddc | ||
|
|
766bc24241 | ||
|
|
8e3aad3b0e | ||
|
|
ce2866bf6a | ||
|
|
bcbaad8495 | ||
|
|
3346319082 | ||
|
|
139aa43c1c | ||
|
|
11fdd40e41 | ||
|
|
c4ce86cb0b | ||
|
|
dfe7e9b3ca | ||
|
|
1c29bfc084 | ||
|
|
c48e2bb8bb | ||
|
|
df40b5d02e | ||
|
|
aa26a2222b | ||
|
|
f9d1a967c7 | ||
|
|
b2e1d3f0dd | ||
|
|
24478a9dd4 | ||
|
|
160525bd1f | ||
|
|
d9158ab602 | ||
|
|
2b4cc64026 | ||
|
|
ccd7dcb867 | ||
|
|
5c5c555a7f | ||
|
|
273e4768f3 | ||
|
|
69e359e9fc | ||
|
|
e5352df348 | ||
|
|
f2b4e8e6c6 | ||
|
|
99a38bff8e | ||
|
|
d0aa627715 | ||
|
|
953cbe9d28 | ||
|
|
f7873aba7b | ||
|
|
f9728ecfff | ||
|
|
96a44153de | ||
|
|
cafbcbd2cb | ||
|
|
6397a35e32 | ||
|
|
aea8592880 | ||
|
|
2aa514a34c | ||
|
|
ac39bc4eba | ||
|
|
284d21686e | ||
|
|
00c0c11c76 | ||
|
|
96ebfa8e62 | ||
|
|
95d442d80f | ||
|
|
e4e58882ab | ||
|
|
ea71fc0eec | ||
|
|
462b225d92 | ||
|
|
d8728092f8 | ||
|
|
2c8ef70c43 | ||
|
|
4d2c8a451e | ||
|
|
f7927c85b1 | ||
|
|
2e2fd2a11b | ||
|
|
0e6ec1d36b | ||
|
|
85b33d9104 | ||
|
|
c838ecbbc7 | ||
|
|
068a5c1e64 | ||
|
|
4d559a1864 | ||
|
|
322142dd66 | ||
|
|
39f37b706c | ||
|
|
4e10dd4f80 | ||
|
|
ccaa600ff4 | ||
|
|
a3fcfeb62f | ||
|
|
d2cbbafeb1 | ||
|
|
eaf8e16414 | ||
|
|
5a65cf9f6d | ||
|
|
a8a68ae1b0 | ||
|
|
7643a27fb1 | ||
|
|
71fdb53c2e | ||
|
|
d6312f6f83 | ||
|
|
19220f5e6e | ||
|
|
05456b0905 | ||
|
|
10328dee8d | ||
|
|
fd174ed691 | ||
|
|
2e47f1740c | ||
|
|
15cb96f945 | ||
|
|
1e987db54d | ||
|
|
12a7fed3ae | ||
|
|
f18fac66c2 | ||
|
|
b5c95f9cbf | ||
|
|
46b948388f | ||
|
|
78be58b090 | ||
|
|
54a8e0683b | ||
|
|
702f0caa93 | ||
|
|
2e709e85ae | ||
|
|
debfd57a91 | ||
|
|
c3b8af34ac | ||
|
|
7a65bcb35b | ||
|
|
af6cd9e37c | ||
|
|
344004d0b3 | ||
|
|
a5bc586f09 | ||
|
|
81ca7ab601 | ||
|
|
69c9ccb2ea | ||
|
|
0ec3effab8 | ||
|
|
dba42e91bc | ||
|
|
68fd3bebe5 | ||
|
|
52b975ef0d | ||
|
|
0679af76f4 | ||
|
|
309c10f632 | ||
|
|
07ddec9fd1 | ||
|
|
69a80fd1d9 | ||
|
|
04975de060 | ||
|
|
459a808371 | ||
|
|
ef8a335c93 | ||
|
|
93a52500d1 | ||
|
|
ac2dac330e | ||
|
|
e9843c7c7d | ||
|
|
1f154d0572 | ||
|
|
7ccb17392e | ||
|
|
e8da3f48ec | ||
|
|
614865a8d7 | ||
|
|
4f551ada6e | ||
|
|
608a928967 | ||
|
|
5a14798341 | ||
|
|
e7516e82e3 | ||
|
|
dc3494c041 | ||
|
|
79e3588389 | ||
|
|
45a833bbb2 | ||
|
|
1f22ebe132 | ||
|
|
1846b31936 | ||
|
|
935a17b1c7 | ||
|
|
7b44a472ad | ||
|
|
a46b56271c | ||
|
|
2f9dd7c422 | ||
|
|
f373686b75 | ||
|
|
80557ce43e | ||
|
|
c4f6d93eb9 | ||
|
|
c75c787738 | ||
|
|
ba18bbfd72 | ||
|
|
610b4e5716 | ||
|
|
dc0715de61 | ||
|
|
4f6ff420e8 | ||
|
|
966d5e61c0 | ||
|
|
d017747792 | ||
|
|
70099fdc8f | ||
|
|
aaddec1103 | ||
|
|
b5fdcd577d | ||
|
|
8b9bf07631 | ||
|
|
674444850d | ||
|
|
e74dcc3cbd | ||
|
|
dcae4c98ac | ||
|
|
94a83c5974 | ||
|
|
592976bf09 | ||
|
|
ed736c8e20 | ||
|
|
84133742b1 | ||
|
|
04e19a733b | ||
|
|
c1ae5efb7b | ||
|
|
f630336fed | ||
|
|
fde312edcc | ||
|
|
57eb4f4eaa | ||
|
|
1490a1feaa | ||
|
|
58c622eb91 | ||
|
|
2fd8b190b1 | ||
|
|
81c840ea30 | ||
|
|
21dbeb9810 | ||
|
|
fba004d655 | ||
|
|
631781aa8a | ||
|
|
707dccf6b8 | ||
|
|
5edd29b8e9 | ||
|
|
27815ea3b3 | ||
|
|
5fd275a05c | ||
|
|
b22694d3c2 | ||
|
|
5c95c6898b | ||
|
|
46bc4b71db | ||
|
|
cee81b4757 | ||
|
|
1cec52637f | ||
|
|
1901a4eb49 | ||
|
|
710396f66e | ||
|
|
11bafa9f36 | ||
|
|
9a45318fc9 | ||
|
|
843f47600a | ||
|
|
3d9dee27b5 | ||
|
|
63fd3c2635 | ||
|
|
284ca59ca4 | ||
|
|
2ce24abe40 | ||
|
|
6419444663 | ||
|
|
1a6fd3e302 | ||
|
|
7ac90b8c88 | ||
|
|
faf27c2d5d | ||
|
|
6a0dfd3ce0 | ||
|
|
fdbc3067ce | ||
|
|
c4ead210ee | ||
|
|
0441f6d481 | ||
|
|
09b9a073a0 | ||
|
|
d6d188f224 | ||
|
|
422c291b06 | ||
|
|
ed6ea026a8 | ||
|
|
da30b4744b | ||
|
|
503e357349 | ||
|
|
61e9202781 | ||
|
|
8dbe7a85af | ||
|
|
e86f616305 | ||
|
|
0c36cfd602 | ||
|
|
f38f86a45c | ||
|
|
5033b8fcdc | ||
|
|
be4285742a | ||
|
|
6a008999f5 | ||
|
|
93f4ea240f | ||
|
|
c6fbae2f62 | ||
|
|
3fe0ebb48a | ||
|
|
67b847bf41 | ||
|
|
eaa3db48f5 | ||
|
|
a505d2e3b1 | ||
|
|
9ec5515a39 | ||
|
|
8db17acf67 | ||
|
|
b43719cf34 | ||
|
|
e8216069a5 | ||
|
|
732d35e45f | ||
|
|
dcedaa2cfe | ||
|
|
8d77121c3b | ||
|
|
013cd92219 | ||
|
|
39b5be37af | ||
|
|
86c2ed265d | ||
|
|
87130f06bc | ||
|
|
17f702f510 | ||
|
|
16f3055e10 | ||
|
|
4800af8e28 | ||
|
|
db79c65334 | ||
|
|
d2223f313f | ||
|
|
c9dabc3a14 | ||
|
|
e61f9a6bdb | ||
|
|
6bcec06052 | ||
|
|
0988e8947f | ||
|
|
ff27cc0f51 | ||
|
|
be7d454504 | ||
|
|
3131116ed6 | ||
|
|
965cacf1ba | ||
|
|
e81b49d81b | ||
|
|
17f8b81110 | ||
|
|
5980c91560 | ||
|
|
fda733ea5a | ||
|
|
732cd5b53a | ||
|
|
aae0c5c443 | ||
|
|
d4223311de | ||
|
|
29173c7364 | ||
|
|
7767809a38 | ||
|
|
f0d6a9e646 | ||
|
|
d1538dbeec | ||
|
|
8470962383 | ||
|
|
139bc1ca38 | ||
|
|
6d9f89a452 | ||
|
|
5ba914d6bb | ||
|
|
9a5094a4ed | ||
|
|
3fe7ad04e9 | ||
|
|
bf6a247f54 | ||
|
|
8203cc3c11 | ||
|
|
f287d84b6a | ||
|
|
205d36512c | ||
|
|
0b39353c12 | ||
|
|
97a5616e60 | ||
|
|
b274ac0947 | ||
|
|
ed29d1d18c | ||
|
|
2384d65953 | ||
|
|
7b19601423 | ||
|
|
76bf43cb13 | ||
|
|
1b7bb3bead | ||
|
|
c844f12f73 | ||
|
|
5ac2164a1c | ||
|
|
c9b89c37c1 | ||
|
|
55bc4c3e22 | ||
|
|
77c7d63296 | ||
|
|
2ae4753efb | ||
|
|
68d0349793 | ||
|
|
ded923b12a | ||
|
|
0726999bf9 | ||
|
|
f89c321a50 | ||
|
|
225427cec1 | ||
|
|
be86e8417f | ||
|
|
bf961c0456 | ||
|
|
3248ca9578 | ||
|
|
d517626033 | ||
|
|
28c9696bdf | ||
|
|
d0baf7a5cc | ||
|
|
492fae7b52 | ||
|
|
4999bdbd96 | ||
|
|
fd7d0f9f99 | ||
|
|
0359c3040a | ||
|
|
d710c16774 | ||
|
|
e57db0dc56 | ||
|
|
e1a6a1ed87 | ||
|
|
f8b3b21bbd | ||
|
|
b6a333ea73 | ||
|
|
efa30662ae | ||
|
|
5fff0c9bb7 | ||
|
|
ec35f653b1 | ||
|
|
9229775f11 | ||
|
|
e14b0c81b0 | ||
|
|
6aa016f6dd | ||
|
|
02cab487c4 | ||
|
|
4ecdc0acaa | ||
|
|
4b02478210 | ||
|
|
6ec1f9add8 | ||
|
|
fe3499ee26 | ||
|
|
aee5d84f45 | ||
|
|
a10b2fa747 | ||
|
|
6d8a81fa26 | ||
|
|
734b33ba55 | ||
|
|
5265c14760 | ||
|
|
5fb1a8b5ef | ||
|
|
29f4c66274 | ||
|
|
82b94d8b57 | ||
|
|
984794850b | ||
|
|
de0d588406 | ||
|
|
5665a5c424 | ||
|
|
4598bf0f7d | ||
|
|
c8ab70f80d | ||
|
|
2d720809d9 | ||
|
|
2c2beb91c7 | ||
|
|
d3f2eb794f | ||
|
|
02ddd80280 | ||
|
|
503776c2a5 | ||
|
|
4c7afbcb71 | ||
|
|
b11514ec13 | ||
|
|
ef7d98f5e3 | ||
|
|
a336304fb3 | ||
|
|
65365e5fef | ||
|
|
7c2121b7c9 | ||
|
|
008f4b0fd2 | ||
|
|
0bf2fc66cc | ||
|
|
d599340549 | ||
|
|
83a711885c | ||
|
|
0ddfe8406d | ||
|
|
036882b588 | ||
|
|
63619e360c | ||
|
|
6d54f9faca | ||
|
|
18254666a6 | ||
|
|
b0dcb4c5e2 | ||
|
|
cd4a4fb324 | ||
|
|
7ae3a4259e | ||
|
|
e82926a9f6 | ||
|
|
e462f7ab26 | ||
|
|
da4ffc9432 | ||
|
|
cc65a1c039 | ||
|
|
8c1a9317ee | ||
|
|
33d1918d71 | ||
|
|
5fc34e885e | ||
|
|
acc61d504f | ||
|
|
cf9c0fc685 | ||
|
|
4822dbae86 | ||
|
|
16d75d1d47 | ||
|
|
878bc5c442 | ||
|
|
e7ccc40035 | ||
|
|
0197dec568 | ||
|
|
795de2399b | ||
|
|
a69674ba4c | ||
|
|
ff5f28ba00 | ||
|
|
df777332a9 | ||
|
|
daca09e65e | ||
|
|
50a908a59f | ||
|
|
121a550da5 | ||
|
|
9f3c8379a6 | ||
|
|
9b48d320be | ||
|
|
a8edefcdde | ||
|
|
1689a9560b | ||
|
|
4f8377de15 | ||
|
|
765ddbdf43 | ||
|
|
181477de05 | ||
|
|
6d8c0cf6cb | ||
|
|
2dc7d20a37 | ||
|
|
9cd5b87dab | ||
|
|
20d88e0668 | ||
|
|
de77785d4f | ||
|
|
bf6cb73fd7 | ||
|
|
b3d3c3704a | ||
|
|
58514e8610 | ||
|
|
a26f7fc4ad | ||
|
|
e06394c940 | ||
|
|
362ea70e26 | ||
|
|
3e6c4a3f60 | ||
|
|
8cf105726f | ||
|
|
361bea4373 | ||
|
|
59fd71d15f | ||
|
|
6c6776f2b4 | ||
|
|
8b824ef26a | ||
|
|
5fd170a7ca | ||
|
|
69487f6caf | ||
|
|
401a94bd36 | ||
|
|
bf12cbec15 | ||
|
|
fdc223a4a6 | ||
|
|
dcbc0e0de0 | ||
|
|
aff377718c | ||
|
|
112e8ba89d | ||
|
|
c2e6231a46 | ||
|
|
51b42dc098 | ||
|
|
43200e127a | ||
|
|
6db5106f85 | ||
|
|
703c0ea56e | ||
|
|
21563abc07 | ||
|
|
ce96eb3810 | ||
|
|
b5c8ee2ab9 | ||
|
|
c27bb3929b | ||
|
|
67913c9985 | ||
|
|
9dac1a30b6 | ||
|
|
ac7f9d09cc | ||
|
|
d8ae3c76da | ||
|
|
3f115618cc | ||
|
|
06aa382910 | ||
|
|
3b69599c77 | ||
|
|
125e7536af | ||
|
|
66f9b07817 | ||
|
|
40aa9955cd | ||
|
|
bfff7104aa | ||
|
|
c0acfd0503 | ||
|
|
379b6c0131 | ||
|
|
3bf427e985 | ||
|
|
2b6e5455dc | ||
|
|
9bc8d6b67d | ||
|
|
0c340fcb48 | ||
|
|
889b16718a | ||
|
|
512cbd8c85 | ||
|
|
39944497f3 | ||
|
|
6fb1266637 | ||
|
|
724cda887e | ||
|
|
d29f3716b2 | ||
|
|
2131f0ebf3 | ||
|
|
0f052162a2 | ||
|
|
02c6de97c8 | ||
|
|
16bce16f81 | ||
|
|
33443ccb29 | ||
|
|
75b00733bf | ||
|
|
d2335f8ffd | ||
|
|
87f3a32f68 | ||
|
|
95bd5e497f | ||
|
|
333957d82a | ||
|
|
75f67ffa6e | ||
|
|
0558fc3723 | ||
|
|
b637e33d9d | ||
|
|
130741480e | ||
|
|
cd19d739ab | ||
|
|
f15ec9b4d4 | ||
|
|
013fcdc052 |
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
vendor
|
||||
141
.github/workflows/pull_request.yaml
vendored
Normal file
141
.github/workflows/pull_request.yaml
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.18.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build
|
||||
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
version: v1.45.2
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Helm Lint
|
||||
run: |
|
||||
cd deployments/kubernetes/chart/reloader
|
||||
helm lint
|
||||
|
||||
- name: Link check
|
||||
uses: gaurav-nelson/github-action-markdown-link-check@v1
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/ && rm kubectl
|
||||
kubectl version --short --client
|
||||
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
|
||||
|
||||
- name: Install Kind
|
||||
run: |
|
||||
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
|
||||
sudo install ./kind /usr/local/bin && rm kind
|
||||
kind version
|
||||
kind version | grep -q ${KIND_VERSION}
|
||||
|
||||
- name: Create Kind Cluster
|
||||
run: |
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
run: |
|
||||
sha=${{ github.event.pull_request.head.sha }}
|
||||
tag="SNAPSHOT-PR-${{ github.event.pull_request.number }}-${sha:0:8}"
|
||||
echo "##[set-output name=GIT_TAG;]$(echo ${tag})"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Generate image repository path
|
||||
run: |
|
||||
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
|
||||
- name: Build and Push Docker Image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Comment on PR
|
||||
uses: mshick/add-pr-comment@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
with:
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
allow-repeats: false
|
||||
|
||||
- name: Notify Failure
|
||||
if: failure()
|
||||
uses: mshick/add-pr-comment@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
with:
|
||||
message: '@${{ github.actor }} Yikes! You better fix it before anyone else finds out! [Build](https://github.com/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha }}/checks) has Failed!'
|
||||
allow-repeats: false
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
182
.github/workflows/push.yaml
vendored
Normal file
182
.github/workflows/push.yaml
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
name: Push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.18.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
version: v1.45.2
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/ && rm kubectl
|
||||
kubectl version --short --client
|
||||
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
|
||||
|
||||
- name: Install Kind
|
||||
run: |
|
||||
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
|
||||
sudo install ./kind /usr/local/bin && rm kind
|
||||
kind version
|
||||
kind version | grep -q ${KIND_VERSION}
|
||||
|
||||
- name: Create Kind Cluster
|
||||
run: |
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
DEFAULT_BUMP: patch
|
||||
DRY_RUN: true
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Generate image repository path
|
||||
run: |
|
||||
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
##############################
|
||||
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
|
||||
##############################
|
||||
|
||||
# Generate tag for operator without "v"
|
||||
- name: Generate Operator Tag
|
||||
id: generate_operator_tag
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: false
|
||||
DEFAULT_BUMP: patch
|
||||
DRY_RUN: true
|
||||
|
||||
# Update chart tag to the latest semver tag
|
||||
- name: Update Chart Version
|
||||
env:
|
||||
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
|
||||
run: make bump-chart
|
||||
|
||||
- name: Helm Template
|
||||
run: |
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests/ && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
|
||||
|
||||
# Publish helm chart
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
with:
|
||||
branch: master
|
||||
repository: stakater-charts
|
||||
target_dir: docs
|
||||
token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
charts_dir: deployments/kubernetes/chart/
|
||||
charts_url: ${{ env.HELM_REGISTRY_URL }}
|
||||
owner: stakater
|
||||
linting: on
|
||||
commit_username: stakater-user
|
||||
commit_email: stakater@gmail.com
|
||||
|
||||
# Commit back changes
|
||||
- name: Commit files
|
||||
run: |
|
||||
git config --local user.email "stakater@gmail.com"
|
||||
git config --local user.name "stakater-user"
|
||||
git status
|
||||
git add .
|
||||
git commit -m "[skip-ci] Update artifacts" -a
|
||||
|
||||
- name: Push changes
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
branch: ${{ github.ref }}
|
||||
|
||||
- name: Push Latest Tag
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
DEFAULT_BUMP: patch
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
44
.github/workflows/release.yaml
vendored
Normal file
44
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: Release Go project
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: 1.18.2
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: GoReleaser build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # See: https://goreleaser.com/ci/actions/
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
id: go
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@master
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always()
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -7,4 +7,6 @@ out/
|
||||
_gopath/
|
||||
.DS_Store
|
||||
.vscode
|
||||
vendor
|
||||
vendor
|
||||
dist
|
||||
Reloader
|
||||
26
.goreleaser.yml
Normal file
26
.goreleaser.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- windows
|
||||
- darwin
|
||||
- linux
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
archives:
|
||||
- name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}-next"
|
||||
checksum:
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_checksums.txt"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
env_files:
|
||||
github_token: /home/jenkins/.apitoken/hub
|
||||
45
Dockerfile
Normal file
45
Dockerfile
Normal file
@@ -0,0 +1,45 @@
|
||||
ARG BUILDER_IMAGE
|
||||
ARG BASE_IMAGE
|
||||
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.18.2} as builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG GOPROXY
|
||||
ARG GOPRIVATE
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
# cache deps before building and copying source so that we don't need to re-download as much
|
||||
# and so that source changes don't invalidate our downloaded layer
|
||||
RUN go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY main.go main.go
|
||||
COPY internal/ internal/
|
||||
COPY pkg/ pkg/
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 \
|
||||
GOOS=${TARGETOS} \
|
||||
GOARCH=${TARGETARCH} \
|
||||
GOPROXY=${GOPROXY} \
|
||||
GOPRIVATE=${GOPRIVATE} \
|
||||
GO111MODULE=on \
|
||||
go build -mod=mod -a -o manager main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM ${BASE_IMAGE:-gcr.io/distroless/static:nonroot}
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/manager .
|
||||
USER 65532:65532
|
||||
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
||||
10
Jenkinsfile
vendored
10
Jenkinsfile
vendored
@@ -1,10 +0,0 @@
|
||||
#!/usr/bin/groovy
|
||||
@Library('github.com/stakater/fabric8-pipeline-library@v2.10.4')
|
||||
|
||||
def dummy
|
||||
|
||||
goBuildAndRelease {
|
||||
chartRepositoryURL = 'https://chartmuseum.release.stakater.com'
|
||||
publicChartRepositoryURL = 'https://stakater.github.io/stakater-charts'
|
||||
publicChartGitURL = 'git@github.com:stakater/stakater-charts.git'
|
||||
}
|
||||
78
Makefile
78
Makefile
@@ -1,35 +1,73 @@
|
||||
# note: call scripts from /scripts
|
||||
|
||||
.PHONY: default build builder-image binary-image test stop clean-images clean push apply deploy
|
||||
.PHONY: default build build-image test stop push apply deploy release release-all manifest push
|
||||
|
||||
BUILDER ?= reloader-builder
|
||||
OS ?= linux
|
||||
ARCH ?= ???
|
||||
ALL_ARCH ?= arm64 arm amd64
|
||||
|
||||
BUILDER_IMAGE ?=
|
||||
BASE_IMAGE ?=
|
||||
BINARY ?= Reloader
|
||||
DOCKER_IMAGE ?= stakater/reloader
|
||||
# Default value "dev"
|
||||
DOCKER_TAG ?= 1.0.0
|
||||
REPOSITORY = ${DOCKER_IMAGE}:${DOCKER_TAG}
|
||||
|
||||
VERSION=$(shell cat .version)
|
||||
# Default value "dev"
|
||||
VERSION ?= 0.0.1
|
||||
|
||||
REPOSITORY_GENERIC = ${DOCKER_IMAGE}:${VERSION}
|
||||
REPOSITORY_ARCH = ${DOCKER_IMAGE}:v${VERSION}-${ARCH}
|
||||
BUILD=
|
||||
|
||||
GOCMD = go
|
||||
GLIDECMD = glide
|
||||
GOFLAGS ?= $(GOFLAGS:)
|
||||
LDFLAGS =
|
||||
GOPROXY ?=
|
||||
GOPRIVATE ?=
|
||||
|
||||
default: build test
|
||||
|
||||
install:
|
||||
"$(GLIDECMD)" install
|
||||
"$(GOCMD)" mod download
|
||||
|
||||
run:
|
||||
go run ./main.go
|
||||
|
||||
build:
|
||||
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
|
||||
|
||||
builder-image:
|
||||
@docker build --network host -t "${BUILDER}" -f build/package/Dockerfile.build .
|
||||
build-image:
|
||||
docker buildx build \
|
||||
--platform ${OS}/${ARCH} \
|
||||
--build-arg GOARCH=$(ARCH) \
|
||||
--build-arg BUILDER_IMAGE=$(BUILDER_IMAGE) \
|
||||
--build-arg BASE_IMAGE=${BASE_IMAGE} \
|
||||
--build-arg GOPROXY=${GOPROXY} \
|
||||
--build-arg GOPRIVATE=${GOPRIVATE} \
|
||||
-t "${REPOSITORY_ARCH}" \
|
||||
--load \
|
||||
-f Dockerfile \
|
||||
.
|
||||
|
||||
binary-image: builder-image
|
||||
@docker run --network host --rm "${BUILDER}" | docker build --network host -t "${REPOSITORY}" -f Dockerfile.run -
|
||||
push:
|
||||
docker push ${REPOSITORY_ARCH}
|
||||
|
||||
release: build-image push manifest
|
||||
|
||||
release-all:
|
||||
-rm -rf ~/.docker/manifests/*
|
||||
# Make arch-specific release
|
||||
@for arch in $(ALL_ARCH) ; do \
|
||||
echo Make release: $$arch ; \
|
||||
make release ARCH=$$arch ; \
|
||||
done
|
||||
|
||||
set -e
|
||||
docker manifest push --purge $(REPOSITORY_GENERIC)
|
||||
|
||||
manifest:
|
||||
set -e
|
||||
docker manifest create -a $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
|
||||
docker manifest annotate --arch $(ARCH) $(REPOSITORY_GENERIC) $(REPOSITORY_ARCH)
|
||||
|
||||
test:
|
||||
"$(GOCMD)" test -timeout 1800s -v ./...
|
||||
@@ -37,16 +75,14 @@ test:
|
||||
stop:
|
||||
@docker stop "${BINARY}"
|
||||
|
||||
clean-images: stop
|
||||
@docker rmi "${BUILDER}" "${BINARY}"
|
||||
|
||||
clean:
|
||||
"$(GOCMD)" clean -i
|
||||
|
||||
push: ## push the latest Docker image to DockerHub
|
||||
docker push $(REPOSITORY)
|
||||
|
||||
apply:
|
||||
kubectl apply -f deployments/manifests/ -n temp-reloader
|
||||
|
||||
deploy: binary-image push apply
|
||||
|
||||
# Bump Chart
|
||||
bump-chart:
|
||||
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
sed -i "s/version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
|
||||
205
README.md
205
README.md
@@ -6,23 +6,79 @@
|
||||
[](https://github.com/stakater/reloader/releases/latest)
|
||||
[](https://hub.docker.com/r/stakater/reloader/)
|
||||
[](https://hub.docker.com/r/stakater/reloader/)
|
||||
[](https://microbadger.com/images/stakater/reloader)
|
||||
[](https://microbadger.com/images/stakater/reloader)
|
||||
[](LICENSE)
|
||||
[](http://stakater.com/?utm_source=Reloader&utm_medium=github)
|
||||
|
||||
## Problem
|
||||
|
||||
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `Deployment`, `Deamonset` and `Statefulset`
|
||||
We would like to watch if some change happens in `ConfigMap` and/or `Secret`; then perform a rolling upgrade on relevant `DeploymentConfig`, `Deployment`, `Daemonset`, `Statefulset` and `Rollout`
|
||||
|
||||
## Solution
|
||||
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `Deployments`, `Deamonsets` and `Statefulsets`.
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts`.
|
||||
|
||||
## Compatibility
|
||||
|
||||
Reloader is compatible with kubernetes >= 1.9
|
||||
|
||||
## How to use Reloader
|
||||
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap` or `Secret` called `foo-secret` or both. Then add your annotation (by default `reloader.stakater.com/auto`) to main metadata of your `Deployment`
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
|
||||
You can restrict this discovery to only `ConfigMap` or `Secret` objects that
|
||||
are tagged with a special annotation. To take advantage of that, annotate
|
||||
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
reloader.stakater.com/search: "true"
|
||||
spec:
|
||||
template:
|
||||
```
|
||||
|
||||
and Reloader will trigger the rolling upgrade upon modification of any
|
||||
`ConfigMap` or `Secret` annotated like this:
|
||||
|
||||
```yaml
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
annotations:
|
||||
reloader.stakater.com/match: "true"
|
||||
data:
|
||||
key: value
|
||||
```
|
||||
|
||||
provided the secret/configmap is being used in an environment variable, or a
|
||||
volume mount.
|
||||
|
||||
Please note that `reloader.stakater.com/search` and
|
||||
`reloader.stakater.com/auto` do not work together. If you have the
|
||||
`reloader.stakater.com/auto: "true"` annotation on your deployment, then it
|
||||
will always restart upon a change in configmaps or secrets it uses, regardless
|
||||
of whether they have the `reloader.stakater.com/match: "true"` annotation or
|
||||
not.
|
||||
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deploymentconfig, deployment, daemonset, statefulset or rollout.
|
||||
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
|
||||
|
||||
### Configmap
|
||||
|
||||
To perform rolling upgrade when change happens only on specific configmaps use below annotation.
|
||||
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`
|
||||
|
||||
```yaml
|
||||
@@ -43,13 +99,15 @@ metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap,bar-configmap,baz-configmap"
|
||||
spec:
|
||||
template:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### Secret
|
||||
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
To perform rolling upgrade when change happens only on specific secrets use below annotation.
|
||||
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
@@ -57,7 +115,7 @@ metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
spec:
|
||||
template:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
@@ -69,56 +127,148 @@ metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo-secret,bar-secret,baz-secret"
|
||||
spec:
|
||||
template:
|
||||
template:
|
||||
metadata:
|
||||
```
|
||||
|
||||
### NOTES
|
||||
|
||||
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with reloader.
|
||||
- For [rollouts](https://github.com/argoproj/argo-rollouts/) reloader simply triggers a change is up to you how you configure the rollout strategy.
|
||||
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
|
||||
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
|
||||
- you may override the auto annotation with the `--auto-annotation` flag
|
||||
- you may override the search annotation with the `--auto-search-annotation` flag
|
||||
and the match annotation with the `--search-match-annotation` flag
|
||||
- you may override the configmap annotation with the `--configmap-annotation` flag
|
||||
- you may override the secret annotation with the `--secret-annotation` flag
|
||||
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
|
||||
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
|
||||
- you can configure logging in JSON format with the `--log-format=json` option
|
||||
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
|
||||
|
||||
## Reload Strategies
|
||||
Reloader supports multiple "reload" strategies for performing rolling upgrades to resources. The following list describes them:
|
||||
- **env-vars**: When a tracked `configMap`/`secret` is updated, this strategy attaches a Reloader specific environment variable to any containers
|
||||
referencing the changed `configMap` or `secret` on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.).
|
||||
This strategy can be specified with the `--reload-strategy=env-vars` argument. Note: This is the default reload strategy.
|
||||
- **annotations**: When a tracked `configMap`/`secret` is updated, this strategy attaches a `reloader.stakater.com/last-reloaded-from` pod template annotation
|
||||
on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy is useful when using resource syncing tools like ArgoCD, since it will not cause these tools
|
||||
to detect configuration drift after a resource is reloaded. Note: Since the attached pod template annotation only tracks the last reload source, this strategy will reload any tracked resource should its
|
||||
`configMap` or `secret` be deleted and recreated.
|
||||
This strategy can be specified with the `--reload-strategy=annotations` argument.
|
||||
|
||||
|
||||
## Deploying to Kubernetes
|
||||
|
||||
You can deploy Reloader by following methods:
|
||||
|
||||
### Vanilla Manifests
|
||||
|
||||
You can apply vanilla manifests by running the following command
|
||||
You can apply vanilla manifests by changing `RELEASE-NAME` placeholder provided in manifest with a proper value and apply it by running the command given below:
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
|
||||
```
|
||||
|
||||
By default Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
By default, Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following args (`spec.template.spec.containers.args`) to its container :
|
||||
|
||||
| Args | Description |
|
||||
| -------------------------------- | -------------------- |
|
||||
| --resources-to-ignore=configMaps | To ignore configMaps |
|
||||
| --resources-to-ignore=secrets | To ignore secrets |
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
|
||||
|
||||
### Vanilla kustomize
|
||||
|
||||
You can also apply the vanilla manifests by running the following command
|
||||
|
||||
```bash
|
||||
kubectl apply -k https://github.com/stakater/Reloader/deployments/kubernetes
|
||||
```
|
||||
|
||||
Similarly to vanilla manifests get deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
|
||||
### Kustomize
|
||||
|
||||
You can write your own `kustomization.yaml` using ours as a 'base' and write patches to tweak the configuration.
|
||||
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
bases:
|
||||
- https://github.com/stakater/Reloader/deployments/kubernetes
|
||||
|
||||
namespace: reloader
|
||||
```
|
||||
|
||||
### Helm Charts
|
||||
|
||||
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands
|
||||
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating reloader from Helm2 to Helm3
|
||||
|
||||
```bash
|
||||
```bash
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm repo update
|
||||
|
||||
helm install stakater/reloader
|
||||
helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
|
||||
```
|
||||
|
||||
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Deamonsets` and `Statefulsets` in `test` namespace.
|
||||
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
|
||||
|
||||
```bash
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test # For helm3 add --generate-name flag or set the release name
|
||||
```
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:
|
||||
|
||||
| Parameter | Description | Type |
|
||||
| ---------------- | -------------------------------------------------------------- | ------- |
|
||||
| ignoreSecrets | To ignore secrets. Valid value are either `true` or `false` | boolean |
|
||||
| ignoreConfigMaps | To ignore configMaps. Valid value are either `true` or `false` | boolean |
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
|
||||
|
||||
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
|
||||
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
|
||||
|
||||
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
|
||||
|
||||
| Parameter | Description | Type |
|
||||
| ---------------- |------------------------------------------------------------------------------| ------- |
|
||||
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
|
||||
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
|
||||
| reloadOnCreate | Enable reload on create events. Valid value are either `true` or `false` | boolean |
|
||||
|
||||
**ReloadOnCreate** reloadOnCreate controls how Reloader handles secrets being added to the cache for the first time. If reloadOnCreate is set to true:
|
||||
* Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload.
|
||||
* When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload.
|
||||
* If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected.
|
||||
|
||||
If ReloadOnCreate is set to false:
|
||||
* Updates to configMaps/Secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration.
|
||||
|
||||
## Help
|
||||
|
||||
### Documentation
|
||||
You can find more documentation [here](docs/)
|
||||
|
||||
You can find more documentation [here](docs)
|
||||
|
||||
### Have a question?
|
||||
|
||||
File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us an [email](mailto:stakater@gmail.com).
|
||||
|
||||
### Talk to us on Slack
|
||||
|
||||
Join and talk to us on Slack for discussing Reloader
|
||||
|
||||
[](https://stakater-slack.herokuapp.com/)
|
||||
[](https://stakater.slack.com/messages/CC5S05S12)
|
||||
[](https://slack.stakater.com/)
|
||||
[](https://stakater-community.slack.com/messages/CC5S05S12)
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -128,13 +278,18 @@ Please use the [issue tracker](https://github.com/stakater/Reloader/issues) to r
|
||||
|
||||
### Developing
|
||||
|
||||
1. Deploy Reloader.
|
||||
2. Run `okteto up` to activate your development container.
|
||||
3. `make build`.
|
||||
4. `./Reloader`
|
||||
|
||||
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
|
||||
|
||||
1. **Fork** the repo on GitHub
|
||||
2. **Clone** the project to your own machine
|
||||
3. **Commit** changes to your own branch
|
||||
4. **Push** your work back up to your fork
|
||||
5. Submit a **Pull request** so that we can review your changes
|
||||
1. **Fork** the repo on GitHub
|
||||
2. **Clone** the project to your own machine
|
||||
3. **Commit** changes to your own branch
|
||||
4. **Push** your work back up to your fork
|
||||
5. Submit a **Pull request** so that we can review your changes
|
||||
|
||||
NOTE: Be sure to merge the latest from "upstream" before making a pull request!
|
||||
|
||||
@@ -153,8 +308,8 @@ Apache2 © [Stakater](http://stakater.com)
|
||||
See [our other projects][community]
|
||||
or contact us in case of professional services and queries on <hello@stakater.com>
|
||||
|
||||
[website]: http://stakater.com/
|
||||
[community]: https://github.com/stakater/
|
||||
[website]: http://stakater.com/
|
||||
[community]: https://github.com/stakater/
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
FROM stakater/go-glide:1.9.3
|
||||
MAINTAINER "Stakater Team"
|
||||
|
||||
RUN apk update
|
||||
|
||||
RUN apk -v --update \
|
||||
add git build-base && \
|
||||
rm -rf /var/cache/apk/* && \
|
||||
mkdir -p "$GOPATH/src/github.com/stakater/Reloader"
|
||||
|
||||
ADD . "$GOPATH/src/github.com/stakater/Reloader"
|
||||
|
||||
RUN cd "$GOPATH/src/github.com/stakater/Reloader" && \
|
||||
glide update && \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a --installsuffix cgo --ldflags="-s" -o /Reloader
|
||||
|
||||
COPY build/package/Dockerfile.run /
|
||||
|
||||
# Running this image produces a tarball suitable to be piped into another
|
||||
# Docker build command.
|
||||
CMD tar -cf - -C / Dockerfile.run Reloader
|
||||
@@ -1,8 +0,0 @@
|
||||
FROM alpine:3.4
|
||||
MAINTAINER "Stakater Team"
|
||||
|
||||
RUN apk add --update ca-certificates
|
||||
|
||||
COPY Reloader /bin/Reloader
|
||||
|
||||
ENTRYPOINT ["/bin/Reloader"]
|
||||
Binary file not shown.
@@ -3,27 +3,29 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: 0.0.20
|
||||
appVersion: 0.0.20
|
||||
version: v0.0.123
|
||||
appVersion: v0.0.123
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
home: https://github.com/stakater/Reloader
|
||||
sources:
|
||||
- https://github.com/stakater/IngressMonitorController
|
||||
- https://github.com/stakater/Reloader
|
||||
icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png
|
||||
maintainers:
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
- name: ahmedwaleedmalik
|
||||
email: waleed@stakater.com
|
||||
|
||||
@@ -5,6 +5,7 @@ approvers:
|
||||
- waseem-h
|
||||
- rasheedamir
|
||||
- ahsan-storm
|
||||
- ahmedwaleedmalik
|
||||
reviewers:
|
||||
- faizanahmad055
|
||||
- kahootali
|
||||
@@ -12,3 +13,4 @@ reviewers:
|
||||
- waseem-h
|
||||
- rasheedamir
|
||||
- ahsan-storm
|
||||
- ahmedwaleedmalik
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
- For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap`. Then add this annotation to main metadata of your `Deployment`
|
||||
configmap.reloader.stakater.com/reload: "foo-configmap"
|
||||
{{ .Values.reloader.custom_annotations.configmap | default "configmap.reloader.stakater.com/reload" }}: "foo-configmap"
|
||||
|
||||
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
secret.reloader.stakater.com/reload: "foo-secret"
|
||||
- For a `Deployment` called `foo` have a `Secret` called `foo-secret`. Then add this annotation to main metadata of your `Deployment`
|
||||
{{ .Values.reloader.custom_annotations.secret | default "secret.reloader.stakater.com/reload" }}: "foo-secret"
|
||||
|
||||
- After successful installation, your pods will get rolling updates when a change in data of configmap or secret will happen.
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
|
||||
{{- define "reloader-name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" | lower -}}
|
||||
{{- end -}}
|
||||
@@ -11,24 +12,54 @@ Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "reloader-fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "reloader-labels.chart" -}}
|
||||
app: {{ template "reloader-name" . }}
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
release: {{ .Release.Name | quote }}
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create pod anti affinity labels
|
||||
*/}}
|
||||
{{- define "reloader-podAntiAffinity" -}}
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ template "reloader-fullname" . }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "serviceAccountName" -}}
|
||||
{{- define "reloader-serviceAccountName" -}}
|
||||
{{- if .Values.reloader.serviceAccount.create -}}
|
||||
{{ default (include "reloader-fullname" .) .Values.reloader.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.reloader.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the annotations to support helm3
|
||||
*/}}
|
||||
{{- define "reloader-helm3.annotations" -}}
|
||||
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
|
||||
meta.helm.sh/release-name: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
---
|
||||
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
@@ -11,20 +16,47 @@ metadata:
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
resources:
|
||||
{{- if .Values.reloader.ignoreSecrets }}{{- else }}
|
||||
- secrets
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreConfigMaps }}{{- else }}
|
||||
- configmaps
|
||||
{{- end }}
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
resources:
|
||||
- deploymentconfigs
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
@@ -35,4 +67,31 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
{{- if and .Values.reloader.watchGlobally (.Values.reloader.rbac.enabled) }}
|
||||
---
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
@@ -11,14 +16,14 @@ metadata:
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-name" . }}-role-binding
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "serviceAccountName" . }}
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
{{- if .Values.reloader.deployment.annotations }}
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.deployment.annotations }}
|
||||
{{ toYaml .Values.reloader.deployment.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
@@ -13,19 +14,28 @@ metadata:
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-name" . }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
{{- if not (.Values.reloader.enableHA) }}
|
||||
replicas: 1
|
||||
{{- else }}
|
||||
replicas: {{ .Values.reloader.deployment.replicas }}
|
||||
{{- end}}
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-name" . }}
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 6 }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
{{- if .Values.reloader.deployment.pod.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.reloader.deployment.pod.annotations | indent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 8 }}
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
@@ -35,8 +45,30 @@ spec:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.reloader.deployment.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.reloader.deployment.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.deployment.affinity) (.Values.reloader.enableHA) }}
|
||||
affinity:
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
{{ toYaml .Values.reloader.deployment.affinity | indent 8 }}
|
||||
{{- end}}
|
||||
{{ include "reloader-podAntiAffinity" . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.priorityClassName }}
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- env:
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) (.Values.reloader.enableHA)}}
|
||||
env:
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.open }}
|
||||
{{- if not (empty $value) }}
|
||||
- name: {{ $name | quote }}
|
||||
@@ -67,7 +99,112 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-name" . }}
|
||||
serviceAccountName: {{ template "serviceAccountName" . }}
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9091
|
||||
- name: metrics
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.livenessProbe.successThreshold | default "1" }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: metrics
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.readinessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
|
||||
|
||||
{{- with .Values.reloader.deployment.containerSecurityContext }}
|
||||
securityContext: {{ toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA)}}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreSecrets }}
|
||||
- "--resources-to-ignore=secrets"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreConfigMaps }}
|
||||
- "--resources-to-ignore=configMaps"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreNamespaces }}
|
||||
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.configmap }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.secret }}
|
||||
- "--secret-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.secret }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.auto }}
|
||||
- "--auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.auto }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.search }}
|
||||
- "--auto-search-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.search }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.match }}
|
||||
- "--search-match-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.isArgoRollouts true }}
|
||||
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.reloadOnCreate true }}
|
||||
- "--reload-on-create={{ .Values.reloader.reloadOnCreate }}"
|
||||
{{- end }}
|
||||
{{- if ne .Values.reloader.reloadStrategy "default" }}
|
||||
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
|
||||
{{- end }}
|
||||
{{- if or (gt .Values.reloader.deployment.replicas 1.0) (.Values.reloader.enableHA) }}
|
||||
- "--enable-ha=true"
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.reloader.deployment.resources | indent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.securityContext }}
|
||||
securityContext: {{ toYaml .Values.reloader.deployment.securityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "reloader-serviceAccountName" . }}
|
||||
{{- if hasKey .Values.reloader.deployment "automountServiceAccountToken" }}
|
||||
automountServiceAccountToken: {{ .Values.reloader.deployment.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if ( .Values.reloader.podMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.podMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.podMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.podMonitor.namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.podMonitor.interval }}
|
||||
interval: {{ .Values.reloader.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.podMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -1,7 +1,13 @@
|
||||
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
@@ -10,20 +16,47 @@ metadata:
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
resources:
|
||||
{{- if .Values.reloader.ignoreSecrets }}{{- else }}
|
||||
- secrets
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreConfigMaps }}{{- else }}
|
||||
- configmaps
|
||||
{{- end }}
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
- ""
|
||||
resources:
|
||||
- deploymentconfigs
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "argoproj.io/v1alpha1") (.Values.reloader.isArgoRollouts) }}
|
||||
- apiGroups:
|
||||
- "argoproj.io"
|
||||
- ""
|
||||
resources:
|
||||
- rollouts
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- end }}
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
@@ -34,4 +67,31 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
{{- if and (not (.Values.reloader.watchGlobally)) (.Values.reloader.rbac.enabled) }}
|
||||
---
|
||||
{{- if (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{ else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ toYaml .Values.reloader.rbac.labels | indent 4 }}
|
||||
@@ -11,14 +16,14 @@ metadata:
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-name" . }}-role-binding
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ template "reloader-name" . }}-role
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "serviceAccountName" . }}
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
21
deployments/kubernetes/chart/reloader/templates/secret.yaml
Normal file
21
deployments/kubernetes/chart/reloader/templates/secret.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
{{- if .Values.reloader.deployment.env.secret -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{ if .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD -}}
|
||||
ALERT_ON_RELOAD: {{ .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_SINK -}}
|
||||
ALERT_SINK: {{ .Values.reloader.deployment.env.secret.ALERT_SINK | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL -}}
|
||||
ALERT_WEBHOOK_URL: {{ .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO -}}
|
||||
ALERT_ADDITIONAL_INFO: {{ .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
30
deployments/kubernetes/chart/reloader/templates/service.yaml
Normal file
30
deployments/kubernetes/chart/reloader/templates/service.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
{{- if .Values.reloader.service }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.service.annotations }}
|
||||
{{ toYaml .Values.reloader.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.service.labels }}
|
||||
{{ toYaml .Values.reloader.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
{{ toYaml .Values.reloader.deployment.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- port: {{ .Values.reloader.service.port }}
|
||||
name: http
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{- end }}
|
||||
@@ -1,7 +1,18 @@
|
||||
{{- if .Values.reloader.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.reloader.serviceAccount "automountServiceAccountToken" }}
|
||||
automountServiceAccountToken: {{ .Values.reloader.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.annotations }}
|
||||
{{ toYaml .Values.reloader.serviceAccount.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceAccount.labels }}
|
||||
@@ -10,5 +21,6 @@ metadata:
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "serviceAccountName" . }}
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.serviceMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.serviceMonitor.labels | indent 4}}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.serviceMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.serviceMonitor.namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- targetPort: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.serviceMonitor.interval }}
|
||||
interval: {{ .Values.reloader.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.serviceMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.serviceMonitor.timeout }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
19
deployments/kubernetes/chart/reloader/values.schema.json
Normal file
19
deployments/kubernetes/chart/reloader/values.schema.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reloader": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reloadStrategy": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"default",
|
||||
"env-vars",
|
||||
"annotations"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,74 @@
|
||||
# Generated from deployments/kubernetes/templates/chart/values.yaml.tmpl
|
||||
global:
|
||||
## Reference to one or more secrets to be used when pulling images
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
##
|
||||
imagePullSecrets: []
|
||||
|
||||
kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
reloadOnCreate: false
|
||||
reloadStrategy: default # Set to default, env-vars or annotations
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
# Set to true to enable leadership election allowing you to run multiple replicas
|
||||
enableHA: false
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
# If you wish to run multiple replicas set reloader.enableHA = true
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
|
||||
# An affinity stanza to be applied to the Deployment.
|
||||
# Example:
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: "node-role.kubernetes.io/infra-worker"
|
||||
# operator: "Exists"
|
||||
affinity: {}
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# allowPrivilegeEscalation: false
|
||||
# readOnlyRootFilesystem: true
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
# - key: "node-role.kubernetes.io/infra-worker"
|
||||
# operator: "Exists"
|
||||
# effect: "NoSchedule"
|
||||
tolerations: []
|
||||
|
||||
annotations: {}
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: 0.0.20
|
||||
version: v0.0.123
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: "0.0.20"
|
||||
tag: v0.0.123
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -22,8 +76,44 @@ reloader:
|
||||
open:
|
||||
# secret supports Key value pair as environment variables. It gets the values based on keys from default reloader secret if any.
|
||||
secret:
|
||||
# ALERT_ON_RELOAD: <"true"|"false">
|
||||
# ALERT_SINK: <"slack"> # By default it will be a raw text based webhook
|
||||
# ALERT_WEBHOOK_URL: <"webhook_url">
|
||||
# ALERT_ADDITIONAL_INFO: <"Additional Info like Cluster Name if needed">
|
||||
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
|
||||
field:
|
||||
|
||||
# Liveness and readiness probe timeout values.
|
||||
livenessProbe: {}
|
||||
# timeoutSeconds: 5
|
||||
# failureThreshold: 5
|
||||
# periodSeconds: 10
|
||||
# successThreshold: 1
|
||||
readinessProbe: {}
|
||||
# timeoutSeconds: 15
|
||||
# failureThreshold: 5
|
||||
# periodSeconds: 10
|
||||
# successThreshold: 1
|
||||
|
||||
# Specify resource requests/limits for the deployment.
|
||||
# Example:
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: "100m"
|
||||
# memory: "512Mi"
|
||||
# requests:
|
||||
# cpu: "10m"
|
||||
# memory: "128Mi"
|
||||
resources: {}
|
||||
pod:
|
||||
annotations: {}
|
||||
priorityClassName: ""
|
||||
|
||||
service: {}
|
||||
# labels: {}
|
||||
# annotations: {}
|
||||
# port: 9090
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
labels: {}
|
||||
@@ -32,6 +122,37 @@ reloader:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: reloader
|
||||
name:
|
||||
# Optional flags to pass to the Reloader entrypoint
|
||||
# Example:
|
||||
# custom_annotations:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
|
||||
serviceMonitor:
|
||||
# Deprecated: Service monitor will be removed in future releases of reloader in favour of Pod monitor
|
||||
# Enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
podMonitor:
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
10
deployments/kubernetes/kustomization.yaml
Normal file
10
deployments/kubernetes/kustomization.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- manifests/clusterrole.yaml
|
||||
- manifests/clusterrolebinding.yaml
|
||||
- manifests/role.yaml
|
||||
- manifests/rolebinding.yaml
|
||||
- manifests/serviceaccount.yaml
|
||||
- manifests/deployment.yaml
|
||||
@@ -1,20 +1,24 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
@@ -22,7 +26,6 @@ rules:
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
@@ -33,4 +36,20 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
|
||||
@@ -1,23 +1,25 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role-binding
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-role
|
||||
name: reloader-reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
|
||||
@@ -1,41 +1,68 @@
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.20
|
||||
|
||||
name: reloader
|
||||
version: v0.0.123
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader
|
||||
release: "RELEASE-NAME"
|
||||
app: reloader-reloader
|
||||
release: "reloader"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.20
|
||||
|
||||
version: v0.0.123
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
image: "stakater/reloader:0.0.20"
|
||||
- image: "stakater/reloader:v0.0.123"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader
|
||||
serviceAccountName: reloader
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9091
|
||||
- name: metrics
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: metrics
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
3
deployments/kubernetes/manifests/podmonitor.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# Source: reloader/templates/podmonitor.yaml
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/rbac.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.18
|
||||
chart: "reloader-0.0.18"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.18
|
||||
chart: "reloader-0.0.18"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.18
|
||||
chart: "reloader-0.0.18"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader
|
||||
namespace: default
|
||||
4
deployments/kubernetes/manifests/service.yaml
Normal file
4
deployments/kubernetes/manifests/service.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
4
deployments/kubernetes/manifests/servicemonitor.yaml
Normal file
4
deployments/kubernetes/manifests/servicemonitor.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
@@ -1,194 +1,164 @@
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.20
|
||||
|
||||
name: reloader
|
||||
version: v0.0.123
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader
|
||||
release: "RELEASE-NAME"
|
||||
app: reloader-reloader
|
||||
release: "reloader"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.123"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.20
|
||||
|
||||
version: v0.0.123
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
image: "stakater/reloader:0.0.20"
|
||||
- image: "stakater/reloader:v0.0.123"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader
|
||||
serviceAccountName: reloader
|
||||
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
---
|
||||
# Source: reloader/templates/rbac.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.18
|
||||
chart: "reloader-0.0.18"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.18
|
||||
chart: "reloader-0.0.18"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- "extensions"
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: 0.0.18
|
||||
chart: "reloader-0.0.18"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader
|
||||
namespace: default
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader
|
||||
chart: "reloader-0.0.20"
|
||||
release: "RELEASE-NAME"
|
||||
heritage: "Tiller"
|
||||
name: reloader
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9091
|
||||
- name: metrics
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: metrics
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
@@ -1,12 +1,63 @@
|
||||
# Generated from deployments/kubernetes/templates/chart/values.yaml.tmpl
|
||||
global:
|
||||
## Reference to one or more secrets to be used when pulling images
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
##
|
||||
imagePullSecrets: []
|
||||
|
||||
kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
reloader:
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
reloadOnCreate: false
|
||||
reloadStrategy: default # Set to default, env-vars or annotations
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
watchGlobally: true
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
deployment:
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
|
||||
# An affinity stanza to be applied to the Deployment.
|
||||
# Example:
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: "node-role.kubernetes.io/infra-worker"
|
||||
# operator: "Exists"
|
||||
affinity: {}
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# allowPrivilegeEscalation: false
|
||||
# readOnlyRootFilesystem: true
|
||||
|
||||
# A list of tolerations to be applied to the Deployment.
|
||||
# Example:
|
||||
# tolerations:
|
||||
# - key: "node-role.kubernetes.io/infra-worker"
|
||||
# operator: "Exists"
|
||||
# effect: "NoSchedule"
|
||||
tolerations: []
|
||||
|
||||
annotations: {}
|
||||
labels:
|
||||
provider: stakater
|
||||
@@ -24,6 +75,25 @@ reloader:
|
||||
secret:
|
||||
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
|
||||
field:
|
||||
|
||||
# Specify resource requests/limits for the deployment.
|
||||
# Example:
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: "100m"
|
||||
# memory: "512Mi"
|
||||
# requests:
|
||||
# cpu: "10m"
|
||||
# memory: "128Mi"
|
||||
resources: {}
|
||||
pod:
|
||||
annotations: {}
|
||||
|
||||
service: {}
|
||||
# labels: {}
|
||||
# annotations: {}
|
||||
# port: 9090
|
||||
|
||||
rbac:
|
||||
enabled: true
|
||||
labels: {}
|
||||
@@ -32,6 +102,37 @@ reloader:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
labels: {}
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: reloader
|
||||
name:
|
||||
# Optional flags to pass to the Reloader entrypoint
|
||||
# Example:
|
||||
# custom_annotations:
|
||||
# configmap: "my.company.com/configmap"
|
||||
# secret: "my.company.com/secret"
|
||||
custom_annotations: {}
|
||||
serviceMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
|
||||
podMonitor:
|
||||
# enabling this requires service to be enabled as well, or no endpoints will be found
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
12
docs/Alerting.md
Normal file
12
docs/Alerting.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Alerting on Reload
|
||||
Reloader can alert when it triggers a rolling upgrade on Deployments or StatefulSets. Webhook notification alert would be sent to the configured webhook server with all the required information
|
||||
|
||||
#### Enabling the feature
|
||||
In-order to enable this feature, you need to update the reloader.env.secret section of values.yaml providing the information needed for alert.
|
||||
<pre> ALERT_ON_RELOAD: [ true/false ] Default: false
|
||||
ALERT_SINK: [ slack/webhook ] Default: webhook
|
||||
ALERT_WEBHOOK_URL: Required if ALERT_ON_RELOAD is true
|
||||
ALERT_ADDITIONAL_INFO: Any additional information to be added to alert
|
||||
|
||||
#### Slack incoming-webhook creation docs
|
||||
https://api.slack.com/messaging/webhooks
|
||||
41
docs/Container Build.md
Normal file
41
docs/Container Build.md
Normal file
@@ -0,0 +1,41 @@
|
||||
|
||||
# Container Build
|
||||
> **WARNING:** As a user of Reloader there is no need to build containers, these are freely available here: https://hub.docker.com/r/stakater/reloader/
|
||||
|
||||
Multi-architecture approach is based on original work by @mdh02038: https://github.com/mdh02038/Reloader
|
||||
|
||||
Images tested on linux/arm, linux/arm64 and linux/amd64.
|
||||
|
||||
# Install Pre-Reqs
|
||||
The build environment requires the following packages (tested on Ubuntu 20.04):
|
||||
* golang
|
||||
* make
|
||||
* qemu (for arm, arm64 etc. emulation)
|
||||
* binfmt-support
|
||||
* Docker engine
|
||||
|
||||
## Docker
|
||||
Follow instructions here: https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository
|
||||
|
||||
Once installed, enable the experimental CLI:
|
||||
```
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
```
|
||||
Login, to enable publishing of packages:
|
||||
```
|
||||
sudo docker login
|
||||
```
|
||||
## Remaining Pre-Reqs
|
||||
Remaining Pre-Reqs can be installed via:
|
||||
```
|
||||
sudo apt install golang make qemu-user-static binfmt-support -y
|
||||
```
|
||||
|
||||
# Publish Multi-Architecture Image
|
||||
To build/ publish multi-arch Docker images clone repository and execute from repository root:
|
||||
```
|
||||
sudo make release-all
|
||||
```
|
||||
|
||||
# Additional Links/ Info
|
||||
* *https://medium.com/@artur.klauser/building-multi-architecture-docker-images-with-buildx-27d80f7e2408
|
||||
62
docs/Helm2-to-Helm3.md
Normal file
62
docs/Helm2-to-Helm3.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# Helm2 to Helm3 Migration
|
||||
|
||||
Follow below mentioned instructions to migrate reloader from Helm2 to Helm3
|
||||
|
||||
## Instrcutions:
|
||||
|
||||
There are 3 steps involved in migrating the reloader from Helm2 to Helm3.
|
||||
|
||||
### Step 1:
|
||||
Install the helm-2to3 plugin
|
||||
|
||||
```bash
|
||||
helm3 plugin install https://github.com/helm/helm-2to3
|
||||
|
||||
helm3 2to3 convert <release-name>
|
||||
|
||||
helm3 2to3 cleanup --release-cleanup --skip-confirmation
|
||||
```
|
||||
|
||||
### Step 2:
|
||||
Add the following Helm3 labels and annotations on reloader resources.
|
||||
|
||||
Label:
|
||||
|
||||
```yaml
|
||||
app.kubernetes.io/managed-by=Helm
|
||||
```
|
||||
Annotations:
|
||||
```yaml
|
||||
meta.helm.sh/release-name=<release-name>
|
||||
meta.helm.sh/release-namespace=<namespace>
|
||||
```
|
||||
|
||||
For example, to label and annotate the ClusterRoleBinding and ClusterRole:
|
||||
|
||||
```bash
|
||||
KIND=ClusterRoleBinding
|
||||
NAME=reloader-reloader-role-binding
|
||||
RELEASE=reloader
|
||||
NAMESPACE=kube-system
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
|
||||
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
|
||||
|
||||
KIND=ClusterRole
|
||||
NAME=reloader-reloader-role
|
||||
RELEASE=reloader
|
||||
NAMESPACE=kube-system
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-name=$RELEASE
|
||||
kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
|
||||
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
|
||||
```
|
||||
|
||||
### Step 3:
|
||||
Upgrade to desired version
|
||||
```bash
|
||||
helm3 repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm3 repo update
|
||||
|
||||
helm3 upgrade <release-name> stakater/reloader --version=v0.0.72
|
||||
```
|
||||
@@ -1,6 +1,6 @@
|
||||
# How it works?
|
||||
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Deamonset` and `Statefulset`.
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Daemonset` and `Statefulset`.
|
||||
|
||||
## How change detection works
|
||||
|
||||
@@ -17,25 +17,27 @@ The annotation value is comma separated list of `configmaps` or `secrets`. If a
|
||||
|
||||
### Annotation for Configmap
|
||||
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation to your `Deployment`
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation* to your `Deployment`
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
<small>*the default annotation can be changed with the `--configmap-annotation` flag</small>
|
||||
|
||||
### Annotation for Secret
|
||||
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation to your `Deployment`
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation* to your `Deployment`
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
<small>*the default annotation can be changed with the `--secret-annotation` flag</small>
|
||||
|
||||
Above mentioned annotation are also work for `Daemonsets` and `Statefulsets`
|
||||
Above mentioned annotation are also work for `Daemonsets` `Statefulsets` and `Rollouts`
|
||||
|
||||
## How Rolling upgrade works?
|
||||
|
||||
|
||||
@@ -8,4 +8,5 @@ Reloader is inspired from [Configmapcontroller](https://github.com/fabric8io/con
|
||||
| Reloader can watch both `secrets` and `configmaps`. | ConfigmapController can only watch changes in `configmaps`. It cannot detect changes in other resources like `secrets`. |
|
||||
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | ConfigmapController can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
|
||||
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in configmap controller. It add difficulties for any additional updates in configmap controller and one can not know for sure whether new changes breaks any old functionality or not. |
|
||||
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less pron to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader uses SHA1 to encode the change in configmap or secret. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | Configmap controller uses `FABRICB_FOO_REVISION` environment variable to store any change in configmap controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader allows you to customize your own annotation (for both Secrets and Configmaps) using command line flags | Configmap controller restricts you to only their provided annotation |
|
||||
|
||||
@@ -25,4 +25,16 @@ Reloader supports deployment rollout as well as daemonsets and statefulsets roll
|
||||
k8s-trigger-controller stores the hash value in an annotation `trigger.k8s.io/[secret|configMap]-NAME-last-hash`
|
||||
|
||||
#### Reloader:
|
||||
Reloader stores the hash value in an environment variable `STAKATER_NAME_[SECRET|CONFIGMAP]`
|
||||
Reloader stores the hash value in an environment variable `STAKATER_NAME_[SECRET|CONFIGMAP]`
|
||||
|
||||
### Customization
|
||||
|
||||
#### k8s-trigger-controller:
|
||||
k8s-trigger-controller restricts you to using the `trigger.k8s.io/[secret-configMap]-NAME-last-hash` annotation
|
||||
|
||||
#### Reloader:
|
||||
Reloader allows you to customize the annotation to fit your needs with command line flags:
|
||||
|
||||
- `--auto-annotation <annotation>`
|
||||
- `--configmap-annotation <annotation>`
|
||||
- `--secret-annotation <annotation>`
|
||||
11
docs/Reloader-with-Sealed-Secrets.md
Normal file
11
docs/Reloader-with-Sealed-Secrets.md
Normal file
@@ -0,0 +1,11 @@
|
||||
Below are the steps to use reloader with Sealed Secrets.
|
||||
1. Download and install the kubeseal client from [here](https://github.com/bitnami-labs/sealed-secrets).
|
||||
2. Install the controller for sealed secrets
|
||||
3. Fetch the encryption certificate
|
||||
4. Encrypt the secret.
|
||||
5. Apply the secret.
|
||||
7. Install the tool which uses that sealed secret.
|
||||
8. Install Reloader.
|
||||
9. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see reloader working.
|
||||
10. Apply the updated sealed secret.
|
||||
11. Reloader will restart the pod to use that updated secret.
|
||||
@@ -1,6 +1,6 @@
|
||||
# Verify Reloader's Working
|
||||
|
||||
Reloader's working can be verified by two ways.
|
||||
Reloader's working can be verified by three ways.
|
||||
|
||||
## Verify from logs
|
||||
|
||||
@@ -49,3 +49,13 @@ After a change in `secret` or `configmap`. Run the below mentioned command and v
|
||||
```bash
|
||||
kubectl get pods <pod name> -n <namespace name>
|
||||
```
|
||||
|
||||
## Verify from metrics
|
||||
Some metrics are exported to prometheus endpoint `/metrics` on port `9090`.
|
||||
|
||||
When reloader is unable to reload, `reloader_reload_executed_total{success="false"}` metric gets incremented and when it reloads successfully, `reloader_reload_executed_total{success="true"}` gets incremented. You will be able to see the following metrics, with some other metrics, at `/metrics` endpoint.
|
||||
|
||||
```
|
||||
reloader_reload_executed_total{success="false"} 15
|
||||
reloader_reload_executed_total{success="true"} 12
|
||||
```
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
These are the key features of Reloader:
|
||||
|
||||
1. Restart pod in a depoloyment on change in linked/related configmap's or secret's
|
||||
1. Restart pod in a deployment on change in linked/related configmap's or secret's
|
||||
2. Restart pod in a daemonset on change in linked/related configmap's or secret's
|
||||
3. Restart pod in a statefulset on change in linked/related configmap's or secret's
|
||||
4. Restart pod in a rollout on change in linked/related configmap's or secret's
|
||||
|
||||
269
glide.lock
generated
269
glide.lock
generated
@@ -1,269 +0,0 @@
|
||||
hash: b6fe060028bdb1249ba2413746476c2550b267eeab3c166c36a86e000a8dd354
|
||||
updated: 2018-07-24T21:12:43.027181463+05:00
|
||||
imports:
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 782f4967f2dc4564575ca782fe2d04090b5faca8
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/emicklei/go-restful
|
||||
version: ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
subpackages:
|
||||
- log
|
||||
- name: github.com/emicklei/go-restful-swagger12
|
||||
version: dcef7f55730566d41eae5db10e7d6981829720f6
|
||||
- name: github.com/ghodss/yaml
|
||||
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
- name: github.com/go-openapi/jsonpointer
|
||||
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
|
||||
- name: github.com/go-openapi/jsonreference
|
||||
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
|
||||
- name: github.com/go-openapi/spec
|
||||
version: 6aced65f8501fe1217321abf0749d354824ba2ff
|
||||
- name: github.com/go-openapi/swag
|
||||
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
|
||||
- name: github.com/gogo/protobuf
|
||||
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
|
||||
subpackages:
|
||||
- proto
|
||||
- sortkeys
|
||||
- name: github.com/golang/glog
|
||||
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
- name: github.com/golang/protobuf
|
||||
version: 4bd1920723d7b7c925de087aa32e2187708897f7
|
||||
subpackages:
|
||||
- proto
|
||||
- ptypes
|
||||
- ptypes/any
|
||||
- ptypes/duration
|
||||
- ptypes/timestamp
|
||||
- name: github.com/google/btree
|
||||
version: 7d79101e329e5a3adf994758c578dab82b90c017
|
||||
- name: github.com/google/gofuzz
|
||||
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||
- name: github.com/googleapis/gnostic
|
||||
version: 0c5108395e2debce0d731cf0287ddf7242066aba
|
||||
subpackages:
|
||||
- OpenAPIv2
|
||||
- compiler
|
||||
- extensions
|
||||
- name: github.com/gregjones/httpcache
|
||||
version: 787624de3eb7bd915c329cba748687a3b22666a6
|
||||
subpackages:
|
||||
- diskcache
|
||||
- name: github.com/hashicorp/golang-lru
|
||||
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
||||
subpackages:
|
||||
- simplelru
|
||||
- name: github.com/howeyc/gopass
|
||||
version: bf9dde6d0d2c004a008c27aaee91170c786f6db8
|
||||
- name: github.com/imdario/mergo
|
||||
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
- name: github.com/inconshreveable/mousetrap
|
||||
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
- name: github.com/json-iterator/go
|
||||
version: 36b14963da70d11297d313183d7e6388c8510e1e
|
||||
- name: github.com/juju/ratelimit
|
||||
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
||||
- name: github.com/mailru/easyjson
|
||||
version: d5b7844b561a7bc640052f1b935f7b800330d7e0
|
||||
subpackages:
|
||||
- buffer
|
||||
- jlexer
|
||||
- jwriter
|
||||
- name: github.com/peterbourgon/diskv
|
||||
version: 5f041e8faa004a95c88a202771f4cc3e991971e6
|
||||
- name: github.com/PuerkitoBio/purell
|
||||
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
|
||||
- name: github.com/PuerkitoBio/urlesc
|
||||
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
|
||||
- name: github.com/sirupsen/logrus
|
||||
version: c155da19408a8799da419ed3eeb0cb5db0ad5dbc
|
||||
- name: github.com/spf13/cobra
|
||||
version: ef82de70bb3f60c65fb8eebacbb2d122ef517385
|
||||
- name: github.com/spf13/pflag
|
||||
version: 583c0c0531f06d5278b7d917446061adc344b5cd
|
||||
- name: golang.org/x/crypto
|
||||
version: 81e90905daefcd6fd217b62423c0908922eadb30
|
||||
subpackages:
|
||||
- ssh/terminal
|
||||
- name: golang.org/x/net
|
||||
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
|
||||
subpackages:
|
||||
- context
|
||||
- http2
|
||||
- http2/hpack
|
||||
- idna
|
||||
- lex/httplex
|
||||
- name: golang.org/x/sys
|
||||
version: 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
- name: golang.org/x/text
|
||||
version: b19bf474d317b857955b12035d2c5acb57ce8b01
|
||||
subpackages:
|
||||
- cases
|
||||
- internal
|
||||
- internal/tag
|
||||
- language
|
||||
- runes
|
||||
- secure/bidirule
|
||||
- secure/precis
|
||||
- transform
|
||||
- unicode/bidi
|
||||
- unicode/norm
|
||||
- width
|
||||
- name: gopkg.in/inf.v0
|
||||
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||
- name: k8s.io/api
|
||||
version: fe29995db37613b9c5b2a647544cf627bfa8d299
|
||||
subpackages:
|
||||
- admissionregistration/v1alpha1
|
||||
- apps/v1beta1
|
||||
- apps/v1beta2
|
||||
- authentication/v1
|
||||
- authentication/v1beta1
|
||||
- authorization/v1
|
||||
- authorization/v1beta1
|
||||
- autoscaling/v1
|
||||
- autoscaling/v2beta1
|
||||
- batch/v1
|
||||
- batch/v1beta1
|
||||
- batch/v2alpha1
|
||||
- certificates/v1beta1
|
||||
- core/v1
|
||||
- extensions/v1beta1
|
||||
- networking/v1
|
||||
- policy/v1beta1
|
||||
- rbac/v1
|
||||
- rbac/v1alpha1
|
||||
- rbac/v1beta1
|
||||
- scheduling/v1alpha1
|
||||
- settings/v1alpha1
|
||||
- storage/v1
|
||||
- storage/v1beta1
|
||||
- name: k8s.io/apimachinery
|
||||
version: 019ae5ada31de202164b118aee88ee2d14075c31
|
||||
subpackages:
|
||||
- pkg/api/equality
|
||||
- pkg/api/errors
|
||||
- pkg/api/meta
|
||||
- pkg/api/resource
|
||||
- pkg/apis/meta/internalversion
|
||||
- pkg/apis/meta/v1
|
||||
- pkg/apis/meta/v1/unstructured
|
||||
- pkg/apis/meta/v1alpha1
|
||||
- pkg/conversion
|
||||
- pkg/conversion/queryparams
|
||||
- pkg/conversion/unstructured
|
||||
- pkg/fields
|
||||
- pkg/labels
|
||||
- pkg/runtime
|
||||
- pkg/runtime/schema
|
||||
- pkg/runtime/serializer
|
||||
- pkg/runtime/serializer/json
|
||||
- pkg/runtime/serializer/protobuf
|
||||
- pkg/runtime/serializer/recognizer
|
||||
- pkg/runtime/serializer/streaming
|
||||
- pkg/runtime/serializer/versioning
|
||||
- pkg/selection
|
||||
- pkg/types
|
||||
- pkg/util/cache
|
||||
- pkg/util/clock
|
||||
- pkg/util/diff
|
||||
- pkg/util/errors
|
||||
- pkg/util/framer
|
||||
- pkg/util/intstr
|
||||
- pkg/util/json
|
||||
- pkg/util/net
|
||||
- pkg/util/runtime
|
||||
- pkg/util/sets
|
||||
- pkg/util/validation
|
||||
- pkg/util/validation/field
|
||||
- pkg/util/wait
|
||||
- pkg/util/yaml
|
||||
- pkg/version
|
||||
- pkg/watch
|
||||
- third_party/forked/golang/reflect
|
||||
- name: k8s.io/client-go
|
||||
version: 35874c597fed17ca62cd197e516d7d5ff9a2958c
|
||||
subpackages:
|
||||
- discovery
|
||||
- discovery/fake
|
||||
- kubernetes
|
||||
- kubernetes/fake
|
||||
- kubernetes/scheme
|
||||
- kubernetes/typed/admissionregistration/v1alpha1
|
||||
- kubernetes/typed/admissionregistration/v1alpha1/fake
|
||||
- kubernetes/typed/apps/v1beta1
|
||||
- kubernetes/typed/apps/v1beta1/fake
|
||||
- kubernetes/typed/apps/v1beta2
|
||||
- kubernetes/typed/apps/v1beta2/fake
|
||||
- kubernetes/typed/authentication/v1
|
||||
- kubernetes/typed/authentication/v1/fake
|
||||
- kubernetes/typed/authentication/v1beta1
|
||||
- kubernetes/typed/authentication/v1beta1/fake
|
||||
- kubernetes/typed/authorization/v1
|
||||
- kubernetes/typed/authorization/v1/fake
|
||||
- kubernetes/typed/authorization/v1beta1
|
||||
- kubernetes/typed/authorization/v1beta1/fake
|
||||
- kubernetes/typed/autoscaling/v1
|
||||
- kubernetes/typed/autoscaling/v1/fake
|
||||
- kubernetes/typed/autoscaling/v2beta1
|
||||
- kubernetes/typed/autoscaling/v2beta1/fake
|
||||
- kubernetes/typed/batch/v1
|
||||
- kubernetes/typed/batch/v1/fake
|
||||
- kubernetes/typed/batch/v1beta1
|
||||
- kubernetes/typed/batch/v1beta1/fake
|
||||
- kubernetes/typed/batch/v2alpha1
|
||||
- kubernetes/typed/batch/v2alpha1/fake
|
||||
- kubernetes/typed/certificates/v1beta1
|
||||
- kubernetes/typed/certificates/v1beta1/fake
|
||||
- kubernetes/typed/core/v1
|
||||
- kubernetes/typed/core/v1/fake
|
||||
- kubernetes/typed/extensions/v1beta1
|
||||
- kubernetes/typed/extensions/v1beta1/fake
|
||||
- kubernetes/typed/networking/v1
|
||||
- kubernetes/typed/networking/v1/fake
|
||||
- kubernetes/typed/policy/v1beta1
|
||||
- kubernetes/typed/policy/v1beta1/fake
|
||||
- kubernetes/typed/rbac/v1
|
||||
- kubernetes/typed/rbac/v1/fake
|
||||
- kubernetes/typed/rbac/v1alpha1
|
||||
- kubernetes/typed/rbac/v1alpha1/fake
|
||||
- kubernetes/typed/rbac/v1beta1
|
||||
- kubernetes/typed/rbac/v1beta1/fake
|
||||
- kubernetes/typed/scheduling/v1alpha1
|
||||
- kubernetes/typed/scheduling/v1alpha1/fake
|
||||
- kubernetes/typed/settings/v1alpha1
|
||||
- kubernetes/typed/settings/v1alpha1/fake
|
||||
- kubernetes/typed/storage/v1
|
||||
- kubernetes/typed/storage/v1/fake
|
||||
- kubernetes/typed/storage/v1beta1
|
||||
- kubernetes/typed/storage/v1beta1/fake
|
||||
- pkg/version
|
||||
- rest
|
||||
- rest/watch
|
||||
- testing
|
||||
- tools/auth
|
||||
- tools/cache
|
||||
- tools/clientcmd
|
||||
- tools/clientcmd/api
|
||||
- tools/clientcmd/api/latest
|
||||
- tools/clientcmd/api/v1
|
||||
- tools/metrics
|
||||
- tools/pager
|
||||
- tools/reference
|
||||
- transport
|
||||
- util/cert
|
||||
- util/flowcontrol
|
||||
- util/homedir
|
||||
- util/integer
|
||||
- util/workqueue
|
||||
- name: k8s.io/kube-openapi
|
||||
version: 868f2f29720b192240e18284659231b440f9cda5
|
||||
subpackages:
|
||||
- pkg/common
|
||||
testImports: []
|
||||
14
glide.yaml
14
glide.yaml
@@ -1,14 +0,0 @@
|
||||
package: github.com/stakater/Reloader
|
||||
import:
|
||||
- package: k8s.io/api
|
||||
version: kubernetes-1.8.0
|
||||
- package: k8s.io/apimachinery
|
||||
version: kubernetes-1.8.0
|
||||
- package: k8s.io/client-go
|
||||
version: 5.0.0
|
||||
- package: github.com/spf13/cobra
|
||||
version: 0.0.3
|
||||
- package: github.com/spf13/pflag
|
||||
version: 1.0.1
|
||||
- package: github.com/sirupsen/logrus
|
||||
version: 1.0.5
|
||||
99
go.mod
Normal file
99
go.mod
Normal file
@@ -0,0 +1,99 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v1.2.1
|
||||
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
|
||||
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
|
||||
github.com/parnurzeal/gorequest v0.2.16
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.5.0
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
k8s.io/kubectl v0.23.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.36.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.60.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 // indirect
|
||||
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c // indirect
|
||||
moul.io/http2curl v1.0.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
// Replacements for argo-rollouts
|
||||
replace (
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
k8s.io/api v0.0.0 => k8s.io/api v0.24.2
|
||||
k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.24.2
|
||||
k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.21.0-alpha.0
|
||||
k8s.io/apiserver v0.0.0 => k8s.io/apiserver v0.24.2
|
||||
k8s.io/cli-runtime v0.0.0 => k8s.io/cli-runtime v0.24.2
|
||||
k8s.io/client-go v0.0.0 => k8s.io/client-go v0.24.2
|
||||
k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2
|
||||
k8s.io/cluster-bootstrap v0.0.0 => k8s.io/cluster-bootstrap v0.24.2
|
||||
k8s.io/code-generator v0.0.0 => k8s.io/code-generator v0.20.5-rc.0
|
||||
k8s.io/component-base v0.0.0 => k8s.io/component-base v0.24.2
|
||||
k8s.io/component-helpers v0.0.0 => k8s.io/component-helpers v0.24.2
|
||||
k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2
|
||||
k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0
|
||||
k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.24.2
|
||||
k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.24.2
|
||||
k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2
|
||||
k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2
|
||||
k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2
|
||||
k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.24.2
|
||||
k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2
|
||||
k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2
|
||||
k8s.io/metrics v0.0.0 => k8s.io/metrics v0.24.2
|
||||
k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.20.5-rc.0
|
||||
k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.24.2
|
||||
k8s.io/sample-cli-plugin v0.0.0 => k8s.io/sample-cli-plugin v0.24.2
|
||||
k8s.io/sample-controller v0.0.0 => k8s.io/sample-controller v0.24.2
|
||||
)
|
||||
94
internal/pkg/alerts/alert.go
Normal file
94
internal/pkg/alerts/alert.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package alert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// function to send alert msg to webhook service
|
||||
func SendWebhookAlert(msg string) {
|
||||
webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL")
|
||||
if !ok {
|
||||
logrus.Error("ALERT_WEBHOOK_URL env variable not provided")
|
||||
return
|
||||
}
|
||||
webhook_url = strings.TrimSpace(webhook_url)
|
||||
alert_sink := os.Getenv("ALERT_SINK")
|
||||
alert_sink = strings.ToLower(strings.TrimSpace(alert_sink))
|
||||
|
||||
// Provision to add Proxy to reach webhook server if required
|
||||
webhook_proxy := os.Getenv("ALERT_WEBHOOK_PROXY")
|
||||
webhook_proxy = strings.TrimSpace(webhook_proxy)
|
||||
|
||||
// Provision to add Additional information in the alert. e.g ClusterName
|
||||
alert_additional_info, ok := os.LookupEnv("ALERT_ADDITIONAL_INFO")
|
||||
if ok {
|
||||
alert_additional_info = strings.TrimSpace(alert_additional_info)
|
||||
msg = fmt.Sprintf("%s : %s", alert_additional_info, msg)
|
||||
}
|
||||
|
||||
if alert_sink == "slack" {
|
||||
sendSlackAlert(webhook_url, webhook_proxy, msg)
|
||||
} else {
|
||||
msg = strings.Replace(msg, "*", "", -1)
|
||||
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// function to handle server redirection
|
||||
func redirectPolicy(req gorequest.Request, via []gorequest.Request) error {
|
||||
return fmt.Errorf("incorrect token (redirection)")
|
||||
}
|
||||
|
||||
// function to send alert to slack
|
||||
func sendSlackAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
attachment := Attachment{
|
||||
Text: msg,
|
||||
Color: "good",
|
||||
AuthorName: "Reloader",
|
||||
}
|
||||
|
||||
payload := WebhookMessage{
|
||||
Attachments: []Attachment{attachment},
|
||||
}
|
||||
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(payload).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// function to send alert to webhook service as text
|
||||
func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
Type("text").
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(msg).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
61
internal/pkg/alerts/slack_alert.go
Normal file
61
internal/pkg/alerts/slack_alert.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package alert
|
||||
|
||||
type WebhookMessage struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
IconEmoji string `json:"icon_emoji,omitempty"`
|
||||
IconURL string `json:"icon_url,omitempty"`
|
||||
Channel string `json:"channel,omitempty"`
|
||||
ThreadTimestamp string `json:"thread_ts,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Attachments []Attachment `json:"attachments,omitempty"`
|
||||
Parse string `json:"parse,omitempty"`
|
||||
ResponseType string `json:"response_type,omitempty"`
|
||||
ReplaceOriginal bool `json:"replace_original,omitempty"`
|
||||
DeleteOriginal bool `json:"delete_original,omitempty"`
|
||||
ReplyBroadcast bool `json:"reply_broadcast,omitempty"`
|
||||
}
|
||||
|
||||
type Attachment struct {
|
||||
Color string `json:"color,omitempty"`
|
||||
Fallback string `json:"fallback,omitempty"`
|
||||
|
||||
CallbackID string `json:"callback_id,omitempty"`
|
||||
ID int `json:"id,omitempty"`
|
||||
|
||||
AuthorID string `json:"author_id,omitempty"`
|
||||
AuthorName string `json:"author_name,omitempty"`
|
||||
AuthorSubname string `json:"author_subname,omitempty"`
|
||||
AuthorLink string `json:"author_link,omitempty"`
|
||||
AuthorIcon string `json:"author_icon,omitempty"`
|
||||
|
||||
Title string `json:"title,omitempty"`
|
||||
TitleLink string `json:"title_link,omitempty"`
|
||||
Pretext string `json:"pretext,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
|
||||
ImageURL string `json:"image_url,omitempty"`
|
||||
ThumbURL string `json:"thumb_url,omitempty"`
|
||||
|
||||
ServiceName string `json:"service_name,omitempty"`
|
||||
ServiceIcon string `json:"service_icon,omitempty"`
|
||||
FromURL string `json:"from_url,omitempty"`
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
|
||||
MarkdownIn []string `json:"mrkdwn_in,omitempty"`
|
||||
|
||||
Footer string `json:"footer,omitempty"`
|
||||
FooterIcon string `json:"footer_icon,omitempty"`
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Title string `json:"title"`
|
||||
Value string `json:"value"`
|
||||
Short bool `json:"short"`
|
||||
}
|
||||
|
||||
type Action struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text"`
|
||||
Url string `json:"url"`
|
||||
Style string `json:"style"`
|
||||
}
|
||||
@@ -1,91 +1,307 @@
|
||||
package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
apps_v1beta1 "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
)
|
||||
|
||||
//ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kubernetes.Interface, string) []interface{}
|
||||
// ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kube.Clients, string) []runtime.Object
|
||||
|
||||
//ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(interface{}) []v1.Container
|
||||
// ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(runtime.Object) []v1.Container
|
||||
|
||||
//UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kubernetes.Interface, string, interface{}) error
|
||||
// InitContainersFunc is a generic func to return containers
|
||||
type InitContainersFunc func(runtime.Object) []v1.Container
|
||||
|
||||
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
// VolumesFunc is a generic func to return volumes
|
||||
type VolumesFunc func(runtime.Object) []v1.Volume
|
||||
|
||||
// UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kube.Clients, string, runtime.Object) error
|
||||
|
||||
// AnnotationsFunc is a generic func to return annotations
|
||||
type AnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
// PodAnnotationsFunc is a generic func to return annotations
|
||||
type PodAnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
// RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
type RollingUpgradeFuncs struct {
|
||||
ItemsFunc ItemsFunc
|
||||
ContainersFunc ContainersFunc
|
||||
UpdateFunc UpdateFunc
|
||||
ResourceType string
|
||||
ItemsFunc ItemsFunc
|
||||
AnnotationsFunc AnnotationsFunc
|
||||
PodAnnotationsFunc PodAnnotationsFunc
|
||||
ContainersFunc ContainersFunc
|
||||
InitContainersFunc InitContainersFunc
|
||||
UpdateFunc UpdateFunc
|
||||
VolumesFunc VolumesFunc
|
||||
ResourceType string
|
||||
}
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
func GetDeploymentItems(client kubernetes.Interface, namespace string) []interface{} {
|
||||
deployments, err := client.ExtensionsV1beta1().Deployments(namespace).List(meta_v1.ListOptions{})
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deployments %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(deployments.Items)
|
||||
|
||||
items := make([]runtime.Object, len(deployments.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deployments.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
items[i] = &deployments.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSet in given namespace
|
||||
func GetDaemonSetItems(client kubernetes.Interface, namespace string) []interface{} {
|
||||
daemonSets, err := client.ExtensionsV1beta1().DaemonSets(namespace).List(meta_v1.ListOptions{})
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list daemonSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(daemonSets.Items)
|
||||
|
||||
items := make([]runtime.Object, len(daemonSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range daemonSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &daemonSets.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetStatefulSetItems returns the statefulSet in given namespace
|
||||
func GetStatefulSetItems(client kubernetes.Interface, namespace string) []interface{} {
|
||||
statefulSets, err := client.AppsV1beta1().StatefulSets(namespace).List(meta_v1.ListOptions{})
|
||||
// GetStatefulSetItems returns the statefulSets in given namespace
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list statefulSets %v", err)
|
||||
}
|
||||
return util.InterfaceSlice(statefulSets.Items)
|
||||
|
||||
items := make([]runtime.Object, len(statefulSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range statefulSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &statefulSets.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deploymentConfigs %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(deploymentConfigs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deploymentConfigs.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &deploymentConfigs.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetRolloutItems returns the rollouts in given namespace
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list Rollouts %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(rollouts.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range rollouts.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &rollouts.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDeploymentAnnotations returns the annotations of given deployment
|
||||
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.Deployment).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetAnnotations returns the annotations of given daemonSet
|
||||
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetAnnotations returns the annotations of given statefulSet
|
||||
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
|
||||
func GetDeploymentConfigAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutAnnotations returns the annotations of given rollout
|
||||
func GetRolloutAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
|
||||
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
|
||||
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
|
||||
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
|
||||
func GetDeploymentConfigPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
|
||||
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentContainers returns the containers of given deployment
|
||||
func GetDeploymentContainers(item interface{}) []v1.Container {
|
||||
return item.(v1beta1.Deployment).Spec.Template.Spec.Containers
|
||||
func GetDeploymentContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDaemonSetContainers returns the containers of given daemonset
|
||||
func GetDaemonSetContainers(item interface{}) []v1.Container {
|
||||
return item.(v1beta1.DaemonSet).Spec.Template.Spec.Containers
|
||||
// GetDaemonSetContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetStatefulsetContainers returns the containers of given statefulSet
|
||||
func GetStatefulsetContainers(item interface{}) []v1.Container {
|
||||
return item.(apps_v1beta1.StatefulSet).Spec.Template.Spec.Containers
|
||||
// GetStatefulSetContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetRolloutContainers returns the containers of given rollout
|
||||
func GetRolloutContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentInitContainers returns the containers of given deployment
|
||||
func GetDeploymentInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDaemonSetInitContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetStatefulSetInitContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetRolloutInitContainers returns the containers of given rollout
|
||||
func GetRolloutInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(client kubernetes.Interface, namespace string, resource interface{}) error {
|
||||
deployment := resource.(v1beta1.Deployment)
|
||||
_, err := client.ExtensionsV1beta1().Deployments(namespace).Update(&deployment)
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deployment := resource.(*appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(client kubernetes.Interface, namespace string, resource interface{}) error {
|
||||
daemonSet := resource.(v1beta1.DaemonSet)
|
||||
_, err := client.ExtensionsV1beta1().DaemonSets(namespace).Update(&daemonSet)
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
daemonSet := resource.(*appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulset performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulset(client kubernetes.Interface, namespace string, resource interface{}) error {
|
||||
statefulSet := resource.(apps_v1beta1.StatefulSet)
|
||||
_, err := client.AppsV1beta1().StatefulSets(namespace).Update(&statefulSet)
|
||||
// UpdateStatefulSet performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
statefulSet := resource.(*appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deploymentConfig := resource.(*openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
rollout := resource.(*argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
|
||||
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDeploymentVolumes returns the Volumes of given deployment
|
||||
func GetDeploymentVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDaemonSetVolumes returns the Volumes of given daemonSet
|
||||
func GetDaemonSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetStatefulSetVolumes returns the Volumes of given statefulSet
|
||||
func GetStatefulSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
|
||||
func GetDeploymentConfigVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetRolloutVolumes returns the Volumes of given rollout
|
||||
func GetRolloutVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
@@ -1,26 +1,114 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/leadership"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// NewReloaderCommand starts the reloader controller
|
||||
func NewReloaderCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "reloader",
|
||||
Short: "A watcher for your Kubernetes cluster",
|
||||
Run: startReloader,
|
||||
Use: "reloader",
|
||||
Short: "A watcher for your Kubernetes cluster",
|
||||
PreRunE: validateFlags,
|
||||
Run: startReloader,
|
||||
}
|
||||
|
||||
// options
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
|
||||
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmapts to match the search")
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validateFlags(*cobra.Command, []string) error {
|
||||
// Ensure the reload strategy is one of the following...
|
||||
var validReloadStrategy bool
|
||||
valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy}
|
||||
for _, s := range valid {
|
||||
if s == options.ReloadStrategy {
|
||||
validReloadStrategy = true
|
||||
}
|
||||
}
|
||||
|
||||
if !validReloadStrategy {
|
||||
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
|
||||
return errors.New(err)
|
||||
}
|
||||
|
||||
// Validate that HA options are correct
|
||||
if options.EnableHA {
|
||||
if err := validateHAEnvs(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureLogging(logFormat string) error {
|
||||
switch logFormat {
|
||||
case "json":
|
||||
logrus.SetFormatter(&logrus.JSONFormatter{})
|
||||
default:
|
||||
// just let the library use default on empty string.
|
||||
if logFormat != "" {
|
||||
return fmt.Errorf("unsupported logging formatter: %q", logFormat)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateHAEnvs() error {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
|
||||
if podName == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNameEnv, constants.PodNameEnv)
|
||||
}
|
||||
if podNamespace == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNamespaceEnv, constants.PodNamespaceEnv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHAEnvs() (string, string) {
|
||||
podName := os.Getenv(constants.PodNameEnv)
|
||||
podNamespace := os.Getenv(constants.PodNamespaceEnv)
|
||||
|
||||
return podName, podNamespace
|
||||
}
|
||||
|
||||
func startReloader(cmd *cobra.Command, args []string) {
|
||||
err := configureLogging(options.LogFormat)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
logrus.Info("Starting Reloader")
|
||||
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
|
||||
if len(currentNamespace) == 0 {
|
||||
@@ -29,17 +117,40 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
// create the clientset
|
||||
clientset, err := kube.GetClient()
|
||||
clientset, err := kube.GetKubernetesClient()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
ignoredResourcesList, err := getIgnoredResourcesList(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
ignoredNamespacesList, err := getIgnoredNamespacesList(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
collectors := metrics.SetupPrometheusEndpoint()
|
||||
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
c, err := controller.NewController(clientset, k, currentNamespace)
|
||||
if ignoredResourcesList.Contains(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, collectors)
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
|
||||
// If HA is enabled we only run the controller when
|
||||
if options.EnableHA {
|
||||
continue
|
||||
}
|
||||
// Now let's start the controller
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
@@ -47,6 +158,47 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
go c.Run(1, stop)
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
select {}
|
||||
// Run leadership election
|
||||
if options.EnableHA {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
|
||||
}
|
||||
|
||||
logrus.Fatal(leadership.Healthz())
|
||||
}
|
||||
|
||||
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
|
||||
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
|
||||
}
|
||||
|
||||
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
|
||||
slice, err := cmd.Flags().GetStringSlice(flag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
}
|
||||
|
||||
func getIgnoredResourcesList(cmd *cobra.Command) (util.List, error) {
|
||||
|
||||
ignoredResourcesList, err := getStringSliceFromFlags(cmd, "resources-to-ignore")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, v := range ignoredResourcesList {
|
||||
if v != "configMaps" && v != "secrets" {
|
||||
return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
|
||||
}
|
||||
}
|
||||
|
||||
if len(ignoredResourcesList) > 1 {
|
||||
return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
|
||||
}
|
||||
|
||||
return ignoredResourcesList, nil
|
||||
}
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
package constants
|
||||
|
||||
const (
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps
|
||||
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
|
||||
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets
|
||||
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
|
||||
)
|
||||
@@ -7,4 +7,23 @@ const (
|
||||
SecretEnvVarPostfix = "SECRET"
|
||||
// EnvVarPrefix is a Prefix for environment variable
|
||||
EnvVarPrefix = "STAKATER_"
|
||||
|
||||
// ReloaderAnnotationPrefix is a Prefix for all reloader annotations
|
||||
ReloaderAnnotationPrefix = "reloader.stakater.com"
|
||||
// LastReloadedFromAnnotation is an annotation used to describe the last resource that triggered a reload
|
||||
LastReloadedFromAnnotation = "last-reloaded-from"
|
||||
|
||||
// ReloadStrategyFlag The reload strategy flag name
|
||||
ReloadStrategyFlag = "reload-strategy"
|
||||
// EnvVarsReloadStrategy instructs Reloader to add container environment variables to facilitate a restart
|
||||
EnvVarsReloadStrategy = "env-vars"
|
||||
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
|
||||
AnnotationsReloadStrategy = "annotations"
|
||||
)
|
||||
|
||||
// Leadership election related consts
|
||||
const (
|
||||
LockName string = "stakaer-reloader-lock"
|
||||
PodNameEnv string = "POD_NAME"
|
||||
PodNamespaceEnv string = "POD_NAMESPACE"
|
||||
)
|
||||
|
||||
15
internal/pkg/constants/enums.go
Normal file
15
internal/pkg/constants/enums.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package constants
|
||||
|
||||
// Result is a status for deployment update
|
||||
type Result int
|
||||
|
||||
const (
|
||||
// Updated is returned when environment variable is created/updated
|
||||
Updated Result = 1 + iota
|
||||
// NotUpdated is returned when environment variable is found but had value equals to the new value
|
||||
NotUpdated
|
||||
// NoEnvVarFound is returned when no environment variable is found
|
||||
NoEnvVarFound
|
||||
// NoContainerFound is returned when no environment variable is found
|
||||
NoContainerFound
|
||||
)
|
||||
@@ -6,32 +6,51 @@ import (
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
)
|
||||
|
||||
// Controller for checking events
|
||||
type Controller struct {
|
||||
client kubernetes.Interface
|
||||
indexer cache.Indexer
|
||||
queue workqueue.RateLimitingInterface
|
||||
informer cache.Controller
|
||||
namespace string
|
||||
client kubernetes.Interface
|
||||
indexer cache.Indexer
|
||||
queue workqueue.RateLimitingInterface
|
||||
informer cache.Controller
|
||||
namespace string
|
||||
ignoredNamespaces util.List
|
||||
collectors metrics.Collectors
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// controllerInitialized flag determines whether controlled is being initialized
|
||||
var controllerInitialized bool = false
|
||||
|
||||
// NewController for initializing a Controller
|
||||
func NewController(
|
||||
client kubernetes.Interface, resource string, namespace string) (*Controller, error) {
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
|
||||
|
||||
c := Controller{
|
||||
client: client,
|
||||
namespace: namespace,
|
||||
client: client,
|
||||
namespace: namespace,
|
||||
ignoredNamespaces: ignoredNamespaces,
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{
|
||||
Interface: client.CoreV1().Events(""),
|
||||
})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)})
|
||||
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, fields.Everything())
|
||||
@@ -44,30 +63,54 @@ func NewController(
|
||||
c.indexer = indexer
|
||||
c.informer = informer
|
||||
c.queue = queue
|
||||
c.collectors = collectors
|
||||
c.recorder = recorder
|
||||
|
||||
logrus.Infof("created controller for: %s", resource)
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
})
|
||||
if options.ReloadOnCreate == "true" {
|
||||
if !c.resourceInIgnoredNamespace(obj) && controllerInitialized {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
switch object := raw.(type) {
|
||||
case *v1.ConfigMap:
|
||||
return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
|
||||
case *v1.Secret:
|
||||
return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Update function to add an old object and a new object to the queue in case of updating a resource
|
||||
func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
c.queue.Add(handler.ResourceUpdatedHandler{
|
||||
Resource: new,
|
||||
OldResource: old,
|
||||
})
|
||||
if !c.resourceInIgnoredNamespace(new) {
|
||||
c.queue.Add(handler.ResourceUpdatedHandler{
|
||||
Resource: new,
|
||||
OldResource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Delete function to add an object to the queue in case of deleting a resource
|
||||
func (c *Controller) Delete(old interface{}) {
|
||||
logrus.Infof("Resource deletion has been detected but no further implementation found to take action")
|
||||
// Todo: Any future delete event can be handled here
|
||||
}
|
||||
|
||||
//Run function for controller which handles the queue
|
||||
// Run function for controller which handles the queue
|
||||
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
@@ -91,6 +134,9 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
// At this point the controller is fully initialized and we can start processing the resources
|
||||
controllerInitialized = true
|
||||
|
||||
for c.processNextItem() {
|
||||
}
|
||||
}
|
||||
@@ -125,7 +171,7 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
|
||||
// This controller retries 5 times if something goes wrong. After that, it stops trying.
|
||||
if c.queue.NumRequeues(key) < 5 {
|
||||
logrus.Errorf("Error syncing events %v: %v", key, err)
|
||||
logrus.Errorf("Error syncing events: %v", err)
|
||||
|
||||
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
||||
// queue and the re-enqueue history, the key will be processed later again.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,17 +2,41 @@ package handler
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceCreatedHandler contains new objects
|
||||
type ResourceCreatedHandler struct {
|
||||
Resource interface{}
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Handle processes the newly created resource
|
||||
func (r ResourceCreatedHandler) Handle() error {
|
||||
if r.Resource == nil {
|
||||
logrus.Errorf("Resource creation handler received nil resource")
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// process resource based on its type
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceCreatedHandler) GetConfig() (util.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
return config, oldSHAData
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
)
|
||||
|
||||
// ResourceHandler handles the creation and update of resources
|
||||
type ResourceHandler interface {
|
||||
Handle() error
|
||||
GetConfig() (util.Config, string)
|
||||
}
|
||||
|
||||
@@ -1,23 +1,19 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceUpdatedHandler contains updated objects
|
||||
type ResourceUpdatedHandler struct {
|
||||
Resource interface{}
|
||||
OldResource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Handle processes the updated resource
|
||||
@@ -25,162 +21,27 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
if r.Resource == nil || r.OldResource == nil {
|
||||
logrus.Errorf("Resource update handler received nil resource")
|
||||
} else {
|
||||
config, envVarPostfix, oldSHAData := getConfig(r)
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
logrus.Infof("Changes detected in %s of type '%s' in namespace: %s", config.ResourceName, envVarPostfix, config.Namespace)
|
||||
// process resource based on its type
|
||||
rollingUpgrade(r, config, envVarPostfix, callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
ResourceType: "Deployment",
|
||||
})
|
||||
rollingUpgrade(r, config, envVarPostfix, callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
ResourceType: "DaemonSet",
|
||||
})
|
||||
rollingUpgrade(r, config, envVarPostfix, callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
ResourceType: "StatefulSet",
|
||||
})
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(r ResourceUpdatedHandler, config util.Config, envarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) {
|
||||
client, err := kube.GetClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
|
||||
err = PerformRollingUpgrade(client, config, envarPostfix, upgradeFuncs)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for %s failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func getConfig(r ResourceUpdatedHandler) (util.Config, string, string) {
|
||||
var oldSHAData, envVarPostfix string
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
oldSHAData = getSHAfromConfigmap(r.OldResource.(*v1.ConfigMap).Data)
|
||||
config = getConfigmapConfig(r)
|
||||
envVarPostfix = constants.ConfigmapEnvVarPostfix
|
||||
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
oldSHAData = getSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
config = getSecretConfig(r)
|
||||
envVarPostfix = constants.SecretEnvVarPostfix
|
||||
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
return config, envVarPostfix, oldSHAData
|
||||
}
|
||||
|
||||
func getConfigmapConfig(r ResourceUpdatedHandler) util.Config {
|
||||
configmap := r.Resource.(*v1.ConfigMap)
|
||||
return util.Config{
|
||||
Namespace: configmap.Namespace,
|
||||
ResourceName: configmap.Name,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
SHAValue: getSHAfromConfigmap(configmap.Data),
|
||||
}
|
||||
}
|
||||
|
||||
func getSecretConfig(r ResourceUpdatedHandler) util.Config {
|
||||
secret := r.Resource.(*v1.Secret)
|
||||
return util.Config{
|
||||
Namespace: secret.Namespace,
|
||||
ResourceName: secret.Name,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
SHAValue: getSHAfromSecret(secret.Data),
|
||||
}
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(client kubernetes.Interface, config util.Config, envarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) error {
|
||||
items := upgradeFuncs.ItemsFunc(client, config.Namespace)
|
||||
var err error
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
resourceName := util.ToObjectMeta(i).Name
|
||||
// find correct annotation and update the resource
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
if value == config.ResourceName {
|
||||
updated := updateContainers(containers, value, config.SHAValue, envarPostfix)
|
||||
if !updated {
|
||||
logrus.Warnf("Rolling upgrade failed because no container found to add environment variable in %s of type %s in namespace: %s", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
} else {
|
||||
err = upgradeFuncs.UpdateFunc(client, config.Namespace, i)
|
||||
if err != nil {
|
||||
logrus.Errorf("Update for %s of type %s in namespace %s failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
} else {
|
||||
logrus.Infof("Updated %s of type %s in namespace: %s ", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func updateContainers(containers []v1.Container, annotationValue string, shaData string, envarPostfix string) bool {
|
||||
updated := false
|
||||
envar := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue)+ "_" + envarPostfix
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
|
||||
//update if env var exists
|
||||
updated = updateEnvVar(envs, envar, shaData)
|
||||
|
||||
// if no existing env var exists lets create one
|
||||
if !updated {
|
||||
e := v1.EnvVar{
|
||||
Name: envar,
|
||||
Value: shaData,
|
||||
}
|
||||
containers[i].Env = append(containers[i].Env, e)
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
func updateEnvVar(envs []v1.EnvVar, envar string, shaData string) bool {
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
if envs[j].Value != shaData {
|
||||
envs[j].Value = shaData
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getSHAfromConfigmap(data map[string]string) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
values = append(values, k+"="+v)
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
func getSHAfromSecret(data map[string][]byte) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
values = append(values, k+"="+string(v[:]))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
return config, oldSHAData
|
||||
}
|
||||
|
||||
@@ -1,315 +0,0 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
var (
|
||||
client = testclient.NewSimpleClientset()
|
||||
namespace = "test-handler-" + testutil.RandSeq(5)
|
||||
configmapName = "testconfigmap-handler-" + testutil.RandSeq(5)
|
||||
secretName = "testsecret-handler-" + testutil.RandSeq(5)
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
// Creating namespace
|
||||
testutil.CreateNamespace(namespace, client)
|
||||
|
||||
logrus.Infof("Setting up the test resources")
|
||||
setup()
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
|
||||
logrus.Infof("tearing down the test resources")
|
||||
teardown()
|
||||
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func setup() {
|
||||
// Creating configmap
|
||||
_, err := testutil.CreateConfigMap(client, namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating secret
|
||||
data := "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
_, err = testutil.CreateSecret(client, namespace, secretName, data)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with configmap
|
||||
_, err = testutil.CreateDeployment(client, configmapName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating Deployment with secret
|
||||
_, err = testutil.CreateDeployment(client, secretName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in Deployment with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with configmap
|
||||
_, err = testutil.CreateDaemonSet(client, configmapName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating DaemonSet with secret
|
||||
_, err = testutil.CreateDaemonSet(client, secretName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in DaemonSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with configmap
|
||||
_, err = testutil.CreateStatefulSet(client, configmapName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with configmap creation: %v", err)
|
||||
}
|
||||
|
||||
// Creating StatefulSet with secret
|
||||
_, err = testutil.CreateStatefulSet(client, secretName, namespace)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error in StatefulSet with secret creation: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func teardown() {
|
||||
// Deleting Deployment with configmap
|
||||
deploymentError := testutil.DeleteDeployment(client, namespace, configmapName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with configmap %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
deploymentError = testutil.DeleteDeployment(client, namespace, secretName)
|
||||
if deploymentError != nil {
|
||||
logrus.Errorf("Error while deleting deployment with secret %v", deploymentError)
|
||||
}
|
||||
|
||||
// Deleting DaemonSet with configmap
|
||||
daemonSetError := testutil.DeleteDaemonSet(client, namespace, configmapName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with configmap %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
daemonSetError = testutil.DeleteDaemonSet(client, namespace, secretName)
|
||||
if daemonSetError != nil {
|
||||
logrus.Errorf("Error while deleting daemonSet with secret %v", daemonSetError)
|
||||
}
|
||||
|
||||
// Deleting StatefulSet with configmap
|
||||
statefulSetError := testutil.DeleteStatefulSet(client, namespace, configmapName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with configmap %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Deployment with secret
|
||||
statefulSetError = testutil.DeleteStatefulSet(client, namespace, secretName)
|
||||
if statefulSetError != nil {
|
||||
logrus.Errorf("Error while deleting statefulSet with secret %v", statefulSetError)
|
||||
}
|
||||
|
||||
// Deleting Configmap
|
||||
err := testutil.DeleteConfigMap(client, namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
|
||||
// Deleting Secret
|
||||
err = testutil.DeleteSecret(client, namespace, secretName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secret %v", err)
|
||||
}
|
||||
|
||||
// Deleting namespace
|
||||
testutil.DeleteNamespace(namespace, client)
|
||||
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
ResourceType: "Deployment",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDeploymentWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
ResourceType: "Deployment",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for Deployment with Secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying deployment update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.facebook.com")
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
daemonSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
ResourceType: "DaemonSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForDaemonSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LmZhY2Vib29rLmNvbQ==")
|
||||
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
daemonSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
ResourceType: "DaemonSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for DaemonSet with secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying daemonSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, daemonSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("DaemonSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithConfigmap(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.twitter.com")
|
||||
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
statefulSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with configmap")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.ConfigmapEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollingUpgradeForStatefulSetWithSecret(t *testing.T) {
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, "d3d3LnR3aXR0ZXIuY29t")
|
||||
|
||||
config := util.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
Annotation: constants.SecretUpdateOnChangeAnnotation,
|
||||
}
|
||||
statefulSetFuncs := callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
ContainersFunc: callbacks.GetStatefulsetContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulset,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
|
||||
err := PerformRollingUpgrade(client, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
|
||||
time.Sleep(5 * time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("Rolling upgrade failed for StatefulSet with secret")
|
||||
}
|
||||
|
||||
logrus.Infof("Verifying statefulSet update")
|
||||
updated := testutil.VerifyResourceUpdate(client, config, constants.SecretEnvVarPostfix, statefulSetFuncs)
|
||||
if !updated {
|
||||
t.Errorf("StatefulSet was not updated")
|
||||
}
|
||||
}
|
||||
426
internal/pkg/handler/upgrade.go
Normal file
426
internal/pkg/handler/upgrade.go
Normal file
@@ -0,0 +1,426 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
alert "github.com/stakater/Reloader/internal/pkg/alerts"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
|
||||
func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentItems,
|
||||
AnnotationsFunc: callbacks.GetDeploymentAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetDeploymentPodAnnotations,
|
||||
ContainersFunc: callbacks.GetDeploymentContainers,
|
||||
InitContainersFunc: callbacks.GetDeploymentInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDeployment,
|
||||
VolumesFunc: callbacks.GetDeploymentVolumes,
|
||||
ResourceType: "Deployment",
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
|
||||
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDaemonSetItems,
|
||||
AnnotationsFunc: callbacks.GetDaemonSetAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetDaemonSetPodAnnotations,
|
||||
ContainersFunc: callbacks.GetDaemonSetContainers,
|
||||
InitContainersFunc: callbacks.GetDaemonSetInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDaemonSet,
|
||||
VolumesFunc: callbacks.GetDaemonSetVolumes,
|
||||
ResourceType: "DaemonSet",
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatefulSetRollingUpgradeFuncs returns all callback funcs for a statefulSet
|
||||
func GetStatefulSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetStatefulSetItems,
|
||||
AnnotationsFunc: callbacks.GetStatefulSetAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetStatefulSetPodAnnotations,
|
||||
ContainersFunc: callbacks.GetStatefulSetContainers,
|
||||
InitContainersFunc: callbacks.GetStatefulSetInitContainers,
|
||||
UpdateFunc: callbacks.UpdateStatefulSet,
|
||||
VolumesFunc: callbacks.GetStatefulSetVolumes,
|
||||
ResourceType: "StatefulSet",
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentConfigRollingUpgradeFuncs returns all callback funcs for a deploymentConfig
|
||||
func GetDeploymentConfigRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetDeploymentConfigItems,
|
||||
AnnotationsFunc: callbacks.GetDeploymentConfigAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetDeploymentConfigPodAnnotations,
|
||||
ContainersFunc: callbacks.GetDeploymentConfigContainers,
|
||||
InitContainersFunc: callbacks.GetDeploymentConfigInitContainers,
|
||||
UpdateFunc: callbacks.UpdateDeploymentConfig,
|
||||
VolumesFunc: callbacks.GetDeploymentConfigVolumes,
|
||||
ResourceType: "DeploymentConfig",
|
||||
}
|
||||
}
|
||||
|
||||
// GetArgoRolloutRollingUpgradeFuncs returns all callback funcs for a rollout
|
||||
func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetRolloutItems,
|
||||
AnnotationsFunc: callbacks.GetRolloutAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetRolloutPodAnnotations,
|
||||
ContainersFunc: callbacks.GetRolloutContainers,
|
||||
InitContainersFunc: callbacks.GetRolloutInitContainers,
|
||||
UpdateFunc: callbacks.UpdateRollout,
|
||||
VolumesFunc: callbacks.GetRolloutVolumes,
|
||||
ResourceType: "Rollout",
|
||||
}
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kube.IsOpenshift {
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.IsArgoRollouts == "true" {
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors, recorder)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
|
||||
for _, i := range items {
|
||||
// find correct annotation and update the resource
|
||||
annotations := upgradeFuncs.AnnotationsFunc(i)
|
||||
annotationValue, found := annotations[config.Annotation]
|
||||
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
|
||||
if !found && !foundAuto && !foundSearchAnn {
|
||||
annotations = upgradeFuncs.PodAnnotationsFunc(i)
|
||||
annotationValue = annotations[config.Annotation]
|
||||
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
|
||||
}
|
||||
result := constants.NotUpdated
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
if err == nil && reloaderEnabled {
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
|
||||
if result != constants.Updated && annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.TrimSpace(value)
|
||||
re := regexp.MustCompile("^" + value + "$")
|
||||
if re.Match([]byte(config.ResourceName)) {
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, false)
|
||||
if result == constants.Updated {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result != constants.Updated && searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
}
|
||||
|
||||
if result == constants.Updated {
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resourceName := accessor.GetName()
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
logrus.Errorf(message)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeWarning, "ReloadFail", message)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
logrus.Infof(message)
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
|
||||
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeNormal, "Reloaded", message)
|
||||
}
|
||||
if ok && alert_on_reload == "true" {
|
||||
msg := fmt.Sprintf(
|
||||
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
|
||||
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
alert.SendWebhookAlert(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
|
||||
for i := range volumes {
|
||||
if mountType == constants.ConfigmapEnvVarPostfix {
|
||||
if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
|
||||
if volumes[i].Projected != nil {
|
||||
for j := range volumes[i].Projected.Sources {
|
||||
if volumes[i].Projected.Sources[j].ConfigMap != nil && volumes[i].Projected.Sources[j].ConfigMap.Name == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if mountType == constants.SecretEnvVarPostfix {
|
||||
if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
|
||||
if volumes[i].Projected != nil {
|
||||
for j := range volumes[i].Projected.Sources {
|
||||
if volumes[i].Projected.Sources[j].Secret != nil && volumes[i].Projected.Sources[j].Secret.Name == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func getContainerWithVolumeMount(containers []v1.Container, volumeMountName string) *v1.Container {
|
||||
for i := range containers {
|
||||
volumeMounts := containers[i].VolumeMounts
|
||||
for j := range volumeMounts {
|
||||
if volumeMounts[j].Name == volumeMountName {
|
||||
return &containers[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerWithEnvReference(containers []v1.Container, resourceName string, resourceType string) *v1.Container {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
envVarSource := envs[j].ValueFrom
|
||||
if envVarSource != nil {
|
||||
if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.LocalObjectReference.Name == resourceName {
|
||||
return &containers[i]
|
||||
} else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.LocalObjectReference.Name == resourceName {
|
||||
return &containers[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
envsFrom := containers[i].EnvFrom
|
||||
for j := range envsFrom {
|
||||
if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.LocalObjectReference.Name == resourceName {
|
||||
return &containers[i]
|
||||
} else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.LocalObjectReference.Name == resourceName {
|
||||
return &containers[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
|
||||
volumes := upgradeFuncs.VolumesFunc(item)
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
initContainers := upgradeFuncs.InitContainersFunc(item)
|
||||
var container *v1.Container
|
||||
// Get the volumeMountName to find volumeMount in container
|
||||
volumeMountName := getVolumeMountName(volumes, config.Type, config.ResourceName)
|
||||
// Get the container with mounted configmap/secret
|
||||
if volumeMountName != "" {
|
||||
container = getContainerWithVolumeMount(containers, volumeMountName)
|
||||
if container == nil && len(initContainers) > 0 {
|
||||
container = getContainerWithVolumeMount(initContainers, volumeMountName)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
}
|
||||
} else if container != nil {
|
||||
return container
|
||||
}
|
||||
}
|
||||
|
||||
// Get the container with referenced secret or configmap as env var
|
||||
container = getContainerWithEnvReference(containers, config.ResourceName, config.Type)
|
||||
if container == nil && len(initContainers) > 0 {
|
||||
container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
}
|
||||
}
|
||||
|
||||
// Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload
|
||||
if container == nil && !autoReload {
|
||||
return &containers[0]
|
||||
}
|
||||
|
||||
return container
|
||||
}
|
||||
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
}
|
||||
|
||||
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
|
||||
// Note: the data on this struct is purely informational and is not used for future updates
|
||||
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
annotations, err := createReloadedAnnotations(&reloadSource)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
|
||||
return constants.NotUpdated
|
||||
}
|
||||
|
||||
// Copy the all annotations to the item's annotations
|
||||
pa := upgradeFuncs.PodAnnotationsFunc(item)
|
||||
if pa == nil {
|
||||
return constants.NotUpdated
|
||||
}
|
||||
|
||||
for k, v := range annotations {
|
||||
pa[k] = v
|
||||
}
|
||||
|
||||
return constants.Updated
|
||||
}
|
||||
|
||||
func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
|
||||
if target == nil {
|
||||
return nil, errors.New("target is required")
|
||||
}
|
||||
|
||||
// Create a single "last-invokeReloadStrategy-from" annotation that stores metadata about the
|
||||
// resource that caused the last invokeReloadStrategy.
|
||||
// Intentionally only storing the last item in order to keep
|
||||
// the generated annotations as small as possible.
|
||||
annotations := make(map[string]string)
|
||||
lastReloadedResourceName := fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
|
||||
lastReloadedResource, err := json.Marshal(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
annotations[lastReloadedResourceName] = string(lastReloadedResource)
|
||||
return annotations, nil
|
||||
}
|
||||
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
var result constants.Result
|
||||
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
}
|
||||
|
||||
//update if env var exists
|
||||
result = updateEnvVar(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue)
|
||||
|
||||
// if no existing env var exists lets create one
|
||||
if result == constants.NoEnvVarFound {
|
||||
e := v1.EnvVar{
|
||||
Name: envVar,
|
||||
Value: config.SHAValue,
|
||||
}
|
||||
container.Env = append(container.Env, e)
|
||||
result = constants.Updated
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func updateEnvVar(containers []v1.Container, envVar string, shaData string) constants.Result {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envVar {
|
||||
if envs[j].Value != shaData {
|
||||
envs[j].Value = shaData
|
||||
return constants.Updated
|
||||
}
|
||||
return constants.NotUpdated
|
||||
}
|
||||
}
|
||||
}
|
||||
return constants.NoEnvVarFound
|
||||
}
|
||||
2650
internal/pkg/handler/upgrade_test.go
Normal file
2650
internal/pkg/handler/upgrade_test.go
Normal file
File diff suppressed because it is too large
Load Diff
110
internal/pkg/leadership/leadership.go
Normal file
110
internal/pkg/leadership/leadership.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
||||
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
|
||||
)
|
||||
|
||||
const healthPort string = ":9091"
|
||||
|
||||
var (
|
||||
// Used for liveness probe
|
||||
m sync.Mutex
|
||||
healthy bool = true
|
||||
)
|
||||
|
||||
func GetNewLock(client coordinationv1.CoordinationV1Interface, lockName, podname, namespace string) *resourcelock.LeaseLock {
|
||||
return &resourcelock.LeaseLock{
|
||||
LeaseMeta: v1.ObjectMeta{
|
||||
Name: lockName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Client: client,
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: podname,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// runLeaderElection runs leadership election. If an instance of the controller is the leader and stops leading it will shutdown.
|
||||
func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel context.CancelFunc, id string, controllers []*controller.Controller) {
|
||||
// Construct channels for the controllers to use
|
||||
var stopChannels []chan struct{}
|
||||
for i := 0; i < len(controllers); i++ {
|
||||
stop := make(chan struct{})
|
||||
stopChannels = append(stopChannels, stop)
|
||||
}
|
||||
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: 15 * time.Second,
|
||||
RenewDeadline: 10 * time.Second,
|
||||
RetryPeriod: 2 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(c context.Context) {
|
||||
logrus.Info("became leader, starting controllers")
|
||||
runControllers(controllers, stopChannels)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
logrus.Info("no longer leader, shutting down")
|
||||
stopControllers(stopChannels)
|
||||
cancel()
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
healthy = false
|
||||
},
|
||||
OnNewLeader: func(current_id string) {
|
||||
if current_id == id {
|
||||
logrus.Info("still the leader!")
|
||||
return
|
||||
}
|
||||
logrus.Infof("new leader is %s", current_id)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) {
|
||||
for i, c := range controllers {
|
||||
c := c
|
||||
go c.Run(1, stopChannels[i])
|
||||
}
|
||||
}
|
||||
|
||||
func stopControllers(stopChannels []chan struct{}) {
|
||||
for _, c := range stopChannels {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Healthz serves the liveness probe endpoint. If leadership election is
|
||||
// enabled and a replica stops leading the liveness probe will fail and the
|
||||
// kubelet will restart the container.
|
||||
func Healthz() error {
|
||||
http.HandleFunc("/live", healthz)
|
||||
return http.ListenAndServe(healthPort, nil)
|
||||
}
|
||||
|
||||
func healthz(w http.ResponseWriter, req *http.Request) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if healthy {
|
||||
if i, err := w.Write([]byte("alive")); err != nil {
|
||||
logrus.Infof("failed to write liveness response, wrote: %d bytes, got err: %s", i, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
213
internal/pkg/leadership/leadership_test.go
Normal file
213
internal/pkg/leadership/leadership_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
testutil.CreateNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
|
||||
testutil.DeleteNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func TestHealthz(t *testing.T) {
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 200
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Have the liveness probe serve a 500
|
||||
healthy = false
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElection validates that the liveness endpoint serves 500 when
|
||||
// leadership election fails
|
||||
func TestRunLeaderElection(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), constants.LockName, testutil.Pod, testutil.Namespace)
|
||||
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, []*controller.Controller{})
|
||||
|
||||
// Liveness probe should be serving OK
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Cancel the leader election context, so leadership is released and
|
||||
// live endpoint serves 500
|
||||
cancel()
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElectionWithControllers tests that leadership election works
|
||||
// wiht real controllers and that on context cancellation the controllers stop
|
||||
// running.
|
||||
func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
t.Logf("Creating controller")
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, metrics.NewCollectors())
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), fmt.Sprintf("%s-%d", constants.LockName, 1), testutil.Pod, testutil.Namespace)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
// Start running leadership election, this also starts the controllers
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, controllers)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Create some stuff and do a thing
|
||||
configmapName := testutil.ConfigmapNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
configmapClient, err := testutil.CreateConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating the configmap %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(testutil.Clients.KubernetesClient, configmapName, testutil.Namespace, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating configmap for first time
|
||||
updateErr := testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod envvars has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Fatalf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
|
||||
// Cancel the leader election context, so leadership is released
|
||||
logrus.Info("shutting down controller from test")
|
||||
cancel()
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Updating configmap again
|
||||
updateErr = testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com/new")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
|
||||
// Verifying that the deployment was not updated as leadership has been lost
|
||||
logrus.Infof("Verifying pod envvars has not been updated")
|
||||
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
|
||||
config = util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs = handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated = testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if updated {
|
||||
t.Fatalf("Deployment was updated")
|
||||
}
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap
|
||||
err = testutil.DeleteConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
}
|
||||
43
internal/pkg/metrics/prometheus.go
Normal file
43
internal/pkg/metrics/prometheus.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type Collectors struct {
|
||||
Reloaded *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func NewCollectors() Collectors {
|
||||
reloaded := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "reload_executed_total",
|
||||
Help: "Counter of reloads executed by Reloader.",
|
||||
},
|
||||
[]string{"success"},
|
||||
)
|
||||
|
||||
//set 0 as default value
|
||||
reloaded.With(prometheus.Labels{"success": "true"}).Add(0)
|
||||
reloaded.With(prometheus.Labels{"success": "false"}).Add(0)
|
||||
|
||||
return Collectors{
|
||||
Reloaded: reloaded,
|
||||
}
|
||||
}
|
||||
|
||||
func SetupPrometheusEndpoint() Collectors {
|
||||
collectors := NewCollectors()
|
||||
prometheus.MustRegister(collectors.Reloaded)
|
||||
|
||||
go func() {
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
logrus.Fatal(http.ListenAndServe(":9090", nil))
|
||||
}()
|
||||
|
||||
return collectors
|
||||
}
|
||||
30
internal/pkg/options/flags.go
Normal file
30
internal/pkg/options/flags.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package options
|
||||
|
||||
import "github.com/stakater/Reloader/internal/pkg/constants"
|
||||
|
||||
var (
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// configmaps specified by name
|
||||
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
|
||||
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// secrets specified by name
|
||||
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
|
||||
// ReloaderAutoAnnotation is an annotation to detect changes in secrets
|
||||
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
|
||||
// AutoSearchAnnotation is an annotation to detect changes in
|
||||
// configmaps or triggers with the SearchMatchAnnotation
|
||||
AutoSearchAnnotation = "reloader.stakater.com/search"
|
||||
// SearchMatchAnnotation is an annotation to tag secrets to be found with
|
||||
// AutoSearchAnnotation
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// IsArgoRollouts Adds support for argo rollouts
|
||||
IsArgoRollouts = "false"
|
||||
// ReloadStrategy Specify the update strategy
|
||||
ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
// ReloadOnCreate Adds support to watch create events
|
||||
ReloadOnCreate = "false"
|
||||
// EnableHA adds support for running multiple replicas via leadership election
|
||||
EnableHA = false
|
||||
)
|
||||
@@ -1,20 +1,28 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1_beta1 "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
@@ -28,19 +36,24 @@ var (
|
||||
SecretResourceType = "secrets"
|
||||
)
|
||||
|
||||
func GetClient() *kubernetes.Clientset {
|
||||
newClient, err := kube.GetClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
return newClient
|
||||
}
|
||||
var (
|
||||
Clients = kube.GetClients()
|
||||
Pod = "test-reloader-" + RandSeq(5)
|
||||
Namespace = "test-reloader-" + RandSeq(5)
|
||||
ConfigmapNamePrefix = "testconfigmap-reloader"
|
||||
SecretNamePrefix = "testsecret-reloader"
|
||||
Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
|
||||
Collectors = metrics.NewCollectors()
|
||||
SleepDuration = 3 * time.Second
|
||||
)
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}})
|
||||
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to create namespace for testing", err)
|
||||
logrus.Fatalf("Failed to create namespace for testing %v", err)
|
||||
} else {
|
||||
logrus.Infof("Creating namespace for testing = %s", namespace)
|
||||
}
|
||||
@@ -48,44 +61,163 @@ func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
|
||||
// DeleteNamespace deletes namespace for testing
|
||||
func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
err := client.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{})
|
||||
err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to delete namespace that was created for testing", err)
|
||||
logrus.Fatalf("Failed to delete namespace that was created for testing %v", err)
|
||||
} else {
|
||||
logrus.Infof("Deleting namespace for testing = %s", namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeployment provides deployment for testing
|
||||
func GetDeployment(namespace string, deploymentName string) *v1beta1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &v1beta1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: map[string]string{
|
||||
constants.ConfigmapUpdateOnChangeAnnotation: deploymentName,
|
||||
constants.SecretUpdateOnChangeAnnotation: deploymentName},
|
||||
},
|
||||
Spec: v1beta1.DeploymentSpec{
|
||||
Replicas: &replicaset,
|
||||
Strategy: v1beta1.DeploymentStrategy{
|
||||
Type: v1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
func getObjectMeta(namespace string, name string, autoReload bool) metav1.ObjectMeta {
|
||||
return metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: getAnnotations(name, autoReload),
|
||||
}
|
||||
}
|
||||
|
||||
func getAnnotations(name string, autoReload bool) map[string]string {
|
||||
if autoReload {
|
||||
return map[string]string{
|
||||
options.ReloaderAutoAnnotation: "true"}
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
options.ConfigmapUpdateOnChangeAnnotation: name,
|
||||
options.SecretUpdateOnChangeAnnotation: name}
|
||||
}
|
||||
|
||||
func getEnvVarSources(name string) []v1.EnvFromSource {
|
||||
return []v1.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
},
|
||||
},
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumes(name string) []v1.Volume {
|
||||
return []v1.Volume{
|
||||
{
|
||||
Name: "projectedconfigmap",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: deploymentName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "projectedsecret",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "configmap",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "secret",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeMounts(name string) []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "etc/config",
|
||||
Name: "configmap",
|
||||
},
|
||||
{
|
||||
MountPath: "etc/sec",
|
||||
Name: "secret",
|
||||
},
|
||||
{
|
||||
MountPath: "etc/projectedconfig",
|
||||
Name: "projectedconfigmap",
|
||||
},
|
||||
{
|
||||
MountPath: "etc/projectedsec",
|
||||
Name: "projectedsecret",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodTemplateSpecWithEnvVars(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
{
|
||||
Name: "CONFIGMAP_" + util.ConvertToEnvVarName(name),
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Key: "test.url",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "SECRET_" + util.ConvertToEnvVarName(name),
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Key: "test.url",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -96,78 +228,306 @@ func GetDeployment(namespace string, deploymentName string) *v1beta1.Deployment
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSet provides daemonset for testing
|
||||
func GetDaemonSet(namespace string, daemonsetName string) *v1beta1.DaemonSet {
|
||||
return &v1beta1.DaemonSet{
|
||||
func getPodTemplateSpecWithEnvVarSources(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: daemonsetName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: map[string]string{
|
||||
constants.ConfigmapUpdateOnChangeAnnotation: daemonsetName,
|
||||
constants.SecretUpdateOnChangeAnnotation: daemonsetName},
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1beta1.DaemonSetSpec{
|
||||
UpdateStrategy: v1beta1.DaemonSetUpdateStrategy{
|
||||
Type: v1beta1.RollingUpdateDaemonSetStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
EnvFrom: getEnvVarSources(name),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodTemplateSpecWithVolumes(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: daemonsetName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
VolumeMounts: getVolumeMounts(name),
|
||||
},
|
||||
},
|
||||
Volumes: getVolumes(name),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodTemplateSpecWithInitContainer(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Image: "busybox",
|
||||
Name: "busyBox",
|
||||
VolumeMounts: getVolumeMounts(name),
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: getVolumes(name),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Image: "busybox",
|
||||
Name: "busyBox",
|
||||
EnvFrom: getEnvVarSources(name),
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: name,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeployment provides deployment for testing
|
||||
func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentConfig provides deployment for testing
|
||||
func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig {
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
Type: openshiftv1.DeploymentStrategyTypeRolling,
|
||||
},
|
||||
Template: &podTemplateSpecWithVolume,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentWithInitContainer provides deployment with init container and volumeMounts
|
||||
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithInitContainer(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentWithInitContainerAndEnv provides deployment with init container and EnvSource
|
||||
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithInitContainerAndEnv(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName string) *openshiftv1.DeploymentConfig {
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
Type: openshiftv1.DeploymentStrategyTypeRolling,
|
||||
},
|
||||
Template: &podTemplateSpecWithEnvVars,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVarSources(deploymentName),
|
||||
},
|
||||
}
|
||||
if !both {
|
||||
deployment.ObjectMeta.Annotations = nil
|
||||
}
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true)
|
||||
return deployment
|
||||
}
|
||||
|
||||
// GetDaemonSet provides daemonset for testing
|
||||
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateDaemonSetStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(daemonsetName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateDaemonSetStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(daemonSetName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSet(namespace string, statefulsetName string) *v1_beta1.StatefulSet {
|
||||
return &v1_beta1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: statefulsetName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: map[string]string{
|
||||
constants.ConfigmapUpdateOnChangeAnnotation: statefulsetName,
|
||||
constants.SecretUpdateOnChangeAnnotation: statefulsetName},
|
||||
func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(statefulsetName),
|
||||
},
|
||||
Spec: v1_beta1.StatefulSetSpec{
|
||||
UpdateStrategy: v1_beta1.StatefulSetUpdateStrategy{
|
||||
Type: v1_beta1.RollingUpdateStatefulSetStrategyType,
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "tutum/hello-world",
|
||||
Name: statefulsetName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "BUCKET_NAME",
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithEnvVars(statefulsetName),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -220,12 +580,12 @@ func GetSecretWithUpdatedLabel(namespace string, secretName string, label string
|
||||
}
|
||||
}
|
||||
|
||||
// GetResourceSHA returns the SHA value of given environment variable
|
||||
func GetResourceSHA(containers []v1.Container, envar string) string {
|
||||
// GetResourceSHAFromEnvVar returns the SHA value of given environment variable
|
||||
func GetResourceSHAFromEnvVar(containers []v1.Container, envVar string) string {
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
for j := range envs {
|
||||
if envs[j].Name == envar {
|
||||
if envs[j].Name == envVar {
|
||||
return envs[j].Value
|
||||
}
|
||||
}
|
||||
@@ -233,7 +593,29 @@ func GetResourceSHA(containers []v1.Container, envar string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
//ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
// GetResourceSHAFromAnnotation returns the SHA value of given environment variable
|
||||
func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
|
||||
lastReloadedResourceName := fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
|
||||
annotationJson, ok := podAnnotations[lastReloadedResourceName]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
var last util.ReloadSource
|
||||
bytes := []byte(annotationJson)
|
||||
err := json.Unmarshal(bytes, &last)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return last.Hash
|
||||
}
|
||||
|
||||
// ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
|
||||
values := []string{}
|
||||
if resourceType == SecretResourceType {
|
||||
@@ -255,8 +637,8 @@ func ConvertResourceToSHA(resourceType string, namespace string, resourceName st
|
||||
func CreateConfigMap(client kubernetes.Interface, namespace string, configmapName string, data string) (core_v1.ConfigMapInterface, error) {
|
||||
logrus.Infof("Creating configmap")
|
||||
configmapClient := client.CoreV1().ConfigMaps(namespace)
|
||||
_, err := configmapClient.Create(GetConfigmap(namespace, configmapName, data))
|
||||
time.Sleep(10 * time.Second)
|
||||
_, err := configmapClient.Create(context.TODO(), GetConfigmap(namespace, configmapName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return configmapClient, err
|
||||
}
|
||||
|
||||
@@ -264,59 +646,148 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
|
||||
logrus.Infof("Creating secret")
|
||||
secretClient := client.CoreV1().Secrets(namespace)
|
||||
_, err := secretClient.Create(GetSecret(namespace, secretName, data))
|
||||
time.Sleep(10 * time.Second)
|
||||
_, err := secretClient.Create(context.TODO(), GetSecret(namespace, secretName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return secretClient, err
|
||||
}
|
||||
|
||||
// CreateDeployment creates a deployment in given namespace and returns the Deployment
|
||||
func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string) (*v1beta1.Deployment, error) {
|
||||
func CreateDeployment(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.ExtensionsV1beta1().Deployments(namespace)
|
||||
deployment, err := deploymentClient.Create(GetDeployment(namespace, deploymentName))
|
||||
time.Sleep(10 * time.Second)
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
var deploymentObj *appsv1.Deployment
|
||||
if volumeMount {
|
||||
deploymentObj = GetDeployment(namespace, deploymentName)
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig
|
||||
func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) {
|
||||
logrus.Infof("Creating DeploymentConfig")
|
||||
deploymentConfigsClient := client.AppsV1().DeploymentConfigs(namespace)
|
||||
var deploymentConfigObj *openshiftv1.DeploymentConfig
|
||||
if volumeMount {
|
||||
deploymentConfigObj = GetDeploymentConfig(namespace, deploymentName)
|
||||
} else {
|
||||
deploymentConfigObj = GetDeploymentConfigWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
deploymentConfig, err := deploymentConfigsClient.Create(context.TODO(), deploymentConfigObj, metav1.CreateOptions{})
|
||||
time.Sleep(5 * time.Second)
|
||||
return deploymentConfig, err
|
||||
}
|
||||
|
||||
// CreateDeploymentWithInitContainer creates a deployment in given namespace with init container and returns the Deployment
|
||||
func CreateDeploymentWithInitContainer(client kubernetes.Interface, deploymentName string, namespace string, volumeMount bool) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
var deploymentObj *appsv1.Deployment
|
||||
if volumeMount {
|
||||
deploymentObj = GetDeploymentWithInitContainer(namespace, deploymentName)
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithInitContainerAndEnv(namespace, deploymentName)
|
||||
}
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentWithEnvVarSource creates a deployment in given namespace and returns the Deployment
|
||||
func CreateDeploymentWithEnvVarSource(client kubernetes.Interface, deploymentName string, namespace string) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
|
||||
}
|
||||
|
||||
// CreateDeploymentWithPodAnnotations creates a deployment in given namespace and returns the Deployment
|
||||
func CreateDeploymentWithPodAnnotations(client kubernetes.Interface, deploymentName string, namespace string, both bool) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithPodAnnotations(namespace, deploymentName, both)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentWithEnvVarSourceAndAnnotations returns a deployment in given
|
||||
// namespace with given annotations.
|
||||
func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface, deploymentName string, namespace string, annotations map[string]string) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithEnvVarSources(namespace, deploymentName)
|
||||
deploymentObj.Annotations = annotations
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet
|
||||
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string) (*v1beta1.DaemonSet, error) {
|
||||
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*appsv1.DaemonSet, error) {
|
||||
logrus.Infof("Creating DaemonSet")
|
||||
daemonsetClient := client.ExtensionsV1beta1().DaemonSets(namespace)
|
||||
daemonset, err := daemonsetClient.Create(GetDaemonSet(namespace, daemonsetName))
|
||||
time.Sleep(10 * time.Second)
|
||||
daemonsetClient := client.AppsV1().DaemonSets(namespace)
|
||||
var daemonsetObj *appsv1.DaemonSet
|
||||
if volumeMount {
|
||||
daemonsetObj = GetDaemonSet(namespace, daemonsetName)
|
||||
} else {
|
||||
daemonsetObj = GetDaemonSetWithEnvVars(namespace, daemonsetName)
|
||||
}
|
||||
daemonset, err := daemonsetClient.Create(context.TODO(), daemonsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonset, err
|
||||
}
|
||||
|
||||
// CreateStatefulSet creates a deployment in given namespace and returns the StatefulSet
|
||||
func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, namespace string) (*v1_beta1.StatefulSet, error) {
|
||||
func CreateStatefulSet(client kubernetes.Interface, statefulsetName string, namespace string, volumeMount bool) (*appsv1.StatefulSet, error) {
|
||||
logrus.Infof("Creating StatefulSet")
|
||||
statefulsetClient := client.AppsV1beta1().StatefulSets(namespace)
|
||||
statefulset, err := statefulsetClient.Create(GetStatefulSet(namespace, statefulsetName))
|
||||
time.Sleep(10 * time.Second)
|
||||
statefulsetClient := client.AppsV1().StatefulSets(namespace)
|
||||
var statefulsetObj *appsv1.StatefulSet
|
||||
if volumeMount {
|
||||
statefulsetObj = GetStatefulSet(namespace, statefulsetName)
|
||||
} else {
|
||||
statefulsetObj = GetStatefulSetWithEnvVar(namespace, statefulsetName)
|
||||
}
|
||||
statefulset, err := statefulsetClient.Create(context.TODO(), statefulsetObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulset, err
|
||||
}
|
||||
|
||||
// DeleteDeployment creates a deployment in given namespace and returns the error if any
|
||||
func DeleteDeployment(client kubernetes.Interface, namespace string, deploymentName string) error {
|
||||
logrus.Infof("Deleting Deployment")
|
||||
deploymentError := client.ExtensionsV1beta1().Deployments(namespace).Delete(deploymentName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
deploymentError := client.AppsV1().Deployments(namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentError
|
||||
}
|
||||
|
||||
// DeleteDeploymentConfig deletes a deploymentConfig in given namespace and returns the error if any
|
||||
func DeleteDeploymentConfig(client appsclient.Interface, namespace string, deploymentConfigName string) error {
|
||||
logrus.Infof("Deleting DeploymentConfig")
|
||||
deploymentConfigError := client.AppsV1().DeploymentConfigs(namespace).Delete(context.TODO(), deploymentConfigName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deploymentConfigError
|
||||
}
|
||||
|
||||
// DeleteDaemonSet creates a daemonset in given namespace and returns the error if any
|
||||
func DeleteDaemonSet(client kubernetes.Interface, namespace string, daemonsetName string) error {
|
||||
logrus.Infof("Deleting DaemonSet %s", daemonsetName)
|
||||
daemonsetError := client.ExtensionsV1beta1().DaemonSets(namespace).Delete(daemonsetName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
daemonsetError := client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), daemonsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return daemonsetError
|
||||
}
|
||||
|
||||
// DeleteStatefulSet creates a statefulset in given namespace and returns the error if any
|
||||
func DeleteStatefulSet(client kubernetes.Interface, namespace string, statefulsetName string) error {
|
||||
logrus.Infof("Deleting StatefulSet %s", statefulsetName)
|
||||
statefulsetError := client.AppsV1beta1().StatefulSets(namespace).Delete(statefulsetName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
statefulsetError := client.AppsV1().StatefulSets(namespace).Delete(context.TODO(), statefulsetName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return statefulsetError
|
||||
}
|
||||
|
||||
@@ -329,8 +800,8 @@ func UpdateConfigMap(configmapClient core_v1.ConfigMapInterface, namespace strin
|
||||
} else {
|
||||
configmap = GetConfigmap(namespace, configmapName, data)
|
||||
}
|
||||
_, updateErr := configmapClient.Update(configmap)
|
||||
time.Sleep(10 * time.Second)
|
||||
_, updateErr := configmapClient.Update(context.TODO(), configmap, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
@@ -343,24 +814,24 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
} else {
|
||||
secret = GetSecret(namespace, secretName, data)
|
||||
}
|
||||
_, updateErr := secretClient.Update(secret)
|
||||
time.Sleep(10 * time.Second)
|
||||
_, updateErr := secretClient.Update(context.TODO(), secret, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
|
||||
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
|
||||
logrus.Infof("Deleting configmap %q.\n", configmapName)
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(configmapName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
err := client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteSecret deletes a secret in given namespace and returns the error if any
|
||||
func DeleteSecret(client kubernetes.Interface, namespace string, secretName string) error {
|
||||
logrus.Infof("Deleting secret %q.\n", secretName)
|
||||
err := client.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
time.Sleep(10 * time.Second)
|
||||
err := client.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -374,28 +845,87 @@ func RandSeq(n int) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func VerifyResourceUpdate(client kubernetes.Interface, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(client, config.Namespace)
|
||||
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
annotations := accessor.GetAnnotations()
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
if annotationValue != "" {
|
||||
annotationValue := annotations[config.Annotation]
|
||||
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
matches = true
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
matches := false
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(annotationValue) + "_" + envVarPostfix
|
||||
updated := GetResourceSHA(containers, envName)
|
||||
} else if searchAnnotationValue == "true" {
|
||||
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
|
||||
matches = true
|
||||
}
|
||||
}
|
||||
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
if matches {
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix
|
||||
updated := GetResourceSHAFromEnvVar(containers, envName)
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
annotations := accessor.GetAnnotations()
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := annotations[config.Annotation]
|
||||
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
matches = true
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if searchAnnotationValue == "true" {
|
||||
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
|
||||
matches = true
|
||||
}
|
||||
}
|
||||
|
||||
if matches {
|
||||
updated := GetResourceSHAFromAnnotation(podAnnotations)
|
||||
if updated == config.SHAValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,41 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
//Config contains rolling upgrade configuration parameters
|
||||
type Config struct {
|
||||
Namespace string
|
||||
ResourceName string
|
||||
Annotation string
|
||||
SHAValue string
|
||||
Namespace string
|
||||
ResourceName string
|
||||
ResourceAnnotations map[string]string
|
||||
Annotation string
|
||||
SHAValue string
|
||||
Type string
|
||||
}
|
||||
|
||||
// GetConfigmapConfig provides utility config for configmap
|
||||
func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
|
||||
return Config{
|
||||
Namespace: configmap.Namespace,
|
||||
ResourceName: configmap.Name,
|
||||
ResourceAnnotations: configmap.Annotations,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
SHAValue: GetSHAfromConfigmap(configmap),
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
}
|
||||
}
|
||||
|
||||
// GetSecretConfig provides utility config for secret
|
||||
func GetSecretConfig(secret *v1.Secret) Config {
|
||||
return Config{
|
||||
Namespace: secret.Namespace,
|
||||
ResourceName: secret.Name,
|
||||
ResourceAnnotations: secret.Annotations,
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
SHAValue: GetSHAfromSecret(secret.Data),
|
||||
Type: constants.SecretEnvVarPostfix,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package util
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -36,3 +37,14 @@ func ToObjectMeta(kubernetesObject interface{}) ObjectMeta {
|
||||
ObjectMeta: field,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseBool returns result in bool format after parsing
|
||||
func ParseBool(value interface{}) bool {
|
||||
if reflect.Bool == reflect.TypeOf(value).Kind() {
|
||||
return value.(bool)
|
||||
} else if reflect.String == reflect.TypeOf(value).Kind() {
|
||||
result, _ := strconv.ParseBool(value.(string))
|
||||
return result
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
39
internal/pkg/util/reload_source.go
Normal file
39
internal/pkg/util/reload_source.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package util
|
||||
|
||||
import "time"
|
||||
|
||||
type ReloadSource struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Hash string `json:"hash"`
|
||||
ContainerRefs []string `json:"containerRefs"`
|
||||
ObservedAt int64 `json:"observedAt"`
|
||||
}
|
||||
|
||||
func NewReloadSource(
|
||||
resourceName string,
|
||||
resourceNamespace string,
|
||||
resourceType string,
|
||||
resourceHash string,
|
||||
containerRefs []string,
|
||||
) ReloadSource {
|
||||
return ReloadSource{
|
||||
ObservedAt: time.Now().Unix(),
|
||||
Name: resourceName,
|
||||
Namespace: resourceNamespace,
|
||||
Type: resourceType,
|
||||
Hash: resourceHash,
|
||||
ContainerRefs: containerRefs,
|
||||
}
|
||||
}
|
||||
|
||||
func NewReloadSourceFromConfig(config Config, containerRefs []string) ReloadSource {
|
||||
return NewReloadSource(
|
||||
config.ResourceName,
|
||||
config.Namespace,
|
||||
config.Type,
|
||||
config.SHAValue,
|
||||
containerRefs,
|
||||
)
|
||||
}
|
||||
@@ -2,7 +2,12 @@ package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ConvertToEnvVarName converts the given text into a usable env var
|
||||
@@ -25,3 +30,35 @@ func ConvertToEnvVarName(text string) string {
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func GetSHAfromConfigmap(configmap *v1.ConfigMap) string {
|
||||
values := []string{}
|
||||
for k, v := range configmap.Data {
|
||||
values = append(values, k+"="+v)
|
||||
}
|
||||
for k, v := range configmap.BinaryData {
|
||||
values = append(values, k+"="+base64.StdEncoding.EncodeToString(v))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
func GetSHAfromSecret(data map[string][]byte) string {
|
||||
values := []string{}
|
||||
for k, v := range data {
|
||||
values = append(values, k+"="+string(v[:]))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
type List []string
|
||||
|
||||
func (l *List) Contains(s string) bool {
|
||||
for _, v := range *l {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestConvertToEnvVarName(t *testing.T) {
|
||||
@@ -11,3 +13,35 @@ func TestConvertToEnvVarName(t *testing.T) {
|
||||
t.Errorf("Failed to convert data into environment variable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHashFromConfigMap(t *testing.T) {
|
||||
data := map[*v1.ConfigMap]string{
|
||||
{
|
||||
Data: map[string]string{"test": "test"},
|
||||
}: "Only Data",
|
||||
{
|
||||
Data: map[string]string{"test": "test"},
|
||||
BinaryData: map[string][]byte{"bintest": []byte("test")},
|
||||
}: "Both Data and BinaryData",
|
||||
{
|
||||
BinaryData: map[string][]byte{"bintest": []byte("test")},
|
||||
}: "Only BinaryData",
|
||||
}
|
||||
converted := map[string]string{}
|
||||
for cm, cmName := range data {
|
||||
converted[cmName] = GetSHAfromConfigmap(cm)
|
||||
}
|
||||
|
||||
// Test that the has for each configmap is really unique
|
||||
for cmName, cmHash := range converted {
|
||||
count := 0
|
||||
for _, cmHash2 := range converted {
|
||||
if cmHash == cmHash2 {
|
||||
count++
|
||||
}
|
||||
}
|
||||
if count > 1 {
|
||||
t.Errorf("Found duplicate hashes for %v", cmName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
14
okteto.yml
Normal file
14
okteto.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: reloader-reloader
|
||||
image: okteto/golang:1
|
||||
command: bash
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_PTRACE
|
||||
volumes:
|
||||
- /go/pkg/
|
||||
- /root/.cache/go-build/
|
||||
sync:
|
||||
- .:/app
|
||||
forward:
|
||||
- 2345:2345
|
||||
@@ -1,17 +1,102 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
argorollout "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
|
||||
appsclient "github.com/openshift/client-go/apps/clientset/versioned"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// GetClient gets the client for k8s, if ~/.kube/config exists so get that config else incluster config
|
||||
func GetClient() (*kubernetes.Clientset, error) {
|
||||
// Clients struct exposes interfaces for kubernetes as well as openshift if available
|
||||
type Clients struct {
|
||||
KubernetesClient kubernetes.Interface
|
||||
OpenshiftAppsClient appsclient.Interface
|
||||
ArgoRolloutClient argorollout.Interface
|
||||
}
|
||||
|
||||
var (
|
||||
// IsOpenshift is true if environment is Openshift, it is false if environment is Kubernetes
|
||||
IsOpenshift = isOpenshift()
|
||||
)
|
||||
|
||||
// GetClients returns a `Clients` object containing both openshift and kubernetes clients with an openshift identifier
|
||||
func GetClients() Clients {
|
||||
client, err := GetKubernetesClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
|
||||
var appsClient *appsclient.Clientset
|
||||
|
||||
if IsOpenshift {
|
||||
appsClient, err = GetOpenshiftAppsClient()
|
||||
if err != nil {
|
||||
logrus.Warnf("Unable to create Openshift Apps client error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var rolloutClient *argorollout.Clientset
|
||||
|
||||
rolloutClient, err = GetArgoRolloutClient()
|
||||
if err != nil {
|
||||
logrus.Warnf("Unable to create ArgoRollout client error = %v", err)
|
||||
}
|
||||
|
||||
return Clients{
|
||||
KubernetesClient: client,
|
||||
OpenshiftAppsClient: appsClient,
|
||||
ArgoRolloutClient: rolloutClient,
|
||||
}
|
||||
}
|
||||
|
||||
func GetArgoRolloutClient() (*argorollout.Clientset, error) {
|
||||
config, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return argorollout.NewForConfig(config)
|
||||
}
|
||||
|
||||
func isOpenshift() bool {
|
||||
client, err := GetKubernetesClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/project.openshift.io").Do(context.TODO()).Raw()
|
||||
if err == nil {
|
||||
logrus.Info("Environment: Openshift")
|
||||
return true
|
||||
}
|
||||
logrus.Info("Environment: Kubernetes")
|
||||
return false
|
||||
}
|
||||
|
||||
// GetOpenshiftAppsClient returns an Openshift Client that can query on Apps
|
||||
func GetOpenshiftAppsClient() (*appsclient.Clientset, error) {
|
||||
config, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return appsclient.NewForConfig(config)
|
||||
}
|
||||
|
||||
// GetKubernetesClient gets the client for k8s, if ~/.kube/config exists so get that config else incluster config
|
||||
func GetKubernetesClient() (*kubernetes.Clientset, error) {
|
||||
config, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubernetes.NewForConfig(config)
|
||||
}
|
||||
|
||||
func getConfig() (*rest.Config, error) {
|
||||
var config *rest.Config
|
||||
var err error
|
||||
kubeconfigPath := os.Getenv("KUBECONFIG")
|
||||
if kubeconfigPath == "" {
|
||||
kubeconfigPath = os.Getenv("HOME") + "/.kube/config"
|
||||
@@ -28,8 +113,6 @@ func GetClient() (*kubernetes.Clientset, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubernetes.NewForConfig(config)
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user