mirror of
https://github.com/kubereboot/kured.git
synced 2026-02-14 17:39:49 +00:00
Compare commits
414 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f7961dc47 | ||
|
|
efedb85f53 | ||
|
|
35b7a2d9c6 | ||
|
|
a387ec5d91 | ||
|
|
530d030285 | ||
|
|
605d025509 | ||
|
|
779a332867 | ||
|
|
c966c3370b | ||
|
|
f03716f31a | ||
|
|
980939a8da | ||
|
|
9afdecf1ec | ||
|
|
34177f9b57 | ||
|
|
9e465f89a0 | ||
|
|
2832224a6a | ||
|
|
1b53693264 | ||
|
|
27cd2d1971 | ||
|
|
9a22be71f2 | ||
|
|
ed77686a5c | ||
|
|
1cbf1933dc | ||
|
|
453b69f199 | ||
|
|
0578488672 | ||
|
|
3824070396 | ||
|
|
5c3b6e5294 | ||
|
|
481d1f6482 | ||
|
|
610c7b7b40 | ||
|
|
18ad93be2e | ||
|
|
bec2c9e788 | ||
|
|
29abf56d46 | ||
|
|
f0abb23bfd | ||
|
|
c08d7bb07b | ||
|
|
eff1008d65 | ||
|
|
eeac3c35c9 | ||
|
|
2244e8f9e6 | ||
|
|
cf1ad232f1 | ||
|
|
4706ecee41 | ||
|
|
ec34835511 | ||
|
|
18335b9b38 | ||
|
|
145d9d2141 | ||
|
|
709f2df695 | ||
|
|
f82a42e41f | ||
|
|
ca75f7df31 | ||
|
|
f611bd11ac | ||
|
|
fac3412405 | ||
|
|
79fc7e1d1f | ||
|
|
70e258aabf | ||
|
|
d788f58e60 | ||
|
|
f738638b99 | ||
|
|
771080a2fd | ||
|
|
82aa45ddef | ||
|
|
dfb6c13e8c | ||
|
|
a9b8c8a721 | ||
|
|
a7fbd55747 | ||
|
|
0454a97dae | ||
|
|
5d893c22dc | ||
|
|
e19a1bb7c3 | ||
|
|
52aa00dd1e | ||
|
|
f773059366 | ||
|
|
d87527800f | ||
|
|
3e025930d6 | ||
|
|
c4a35595c6 | ||
|
|
305c302ebd | ||
|
|
a079b240c7 | ||
|
|
b967d9a129 | ||
|
|
63d068a649 | ||
|
|
24a4891c6b | ||
|
|
e3f469523f | ||
|
|
603ace4ef0 | ||
|
|
6a4c64093c | ||
|
|
65c8b7ef5e | ||
|
|
a3d59e3fd7 | ||
|
|
4eaa5802cb | ||
|
|
7aec517498 | ||
|
|
0ad18678bb | ||
|
|
44a68beb2f | ||
|
|
cbf9c46474 | ||
|
|
d677b436a0 | ||
|
|
8093053275 | ||
|
|
6ab853dd71 | ||
|
|
cb84fad891 | ||
|
|
750d7bc7c8 | ||
|
|
bc474beb04 | ||
|
|
5305da5ce2 | ||
|
|
3caf14d536 | ||
|
|
942c850179 | ||
|
|
10b8968a63 | ||
|
|
c62ceb1124 | ||
|
|
0fcbf9d052 | ||
|
|
2e66c6eb96 | ||
|
|
2bc8b54977 | ||
|
|
579475ab93 | ||
|
|
2de1e28245 | ||
|
|
5845fa2b20 | ||
|
|
191366445d | ||
|
|
e165bcaf4a | ||
|
|
9e1e8787ef | ||
|
|
d03a7cf625 | ||
|
|
2b64f02ee0 | ||
|
|
47fadc3607 | ||
|
|
8dbaac16f5 | ||
|
|
8b2864f60a | ||
|
|
5527b9b2dd | ||
|
|
415e2e77ed | ||
|
|
c1b159cbfc | ||
|
|
4860eee7f0 | ||
|
|
6a525cc116 | ||
|
|
342e5fcc71 | ||
|
|
978f5851cb | ||
|
|
1c9e367931 | ||
|
|
2d80ea51ad | ||
|
|
9e34e572f4 | ||
|
|
200dda6cbe | ||
|
|
e59f0b4d33 | ||
|
|
63ac201bc3 | ||
|
|
d9c4107617 | ||
|
|
5f0542bd38 | ||
|
|
03be903f52 | ||
|
|
d8ed33c398 | ||
|
|
4e43d20e81 | ||
|
|
0dcdb632f2 | ||
|
|
ce4d33b47a | ||
|
|
8db46625c2 | ||
|
|
19046cfe2f | ||
|
|
57733b5a80 | ||
|
|
9f86ff674f | ||
|
|
7a1ee358b2 | ||
|
|
c69c7d85e1 | ||
|
|
469fe03e30 | ||
|
|
70d0452267 | ||
|
|
079ba31ef8 | ||
|
|
4b94d3e867 | ||
|
|
9c44934701 | ||
|
|
37633627bd | ||
|
|
9b9d989ebb | ||
|
|
23ab8b24a8 | ||
|
|
bdb6a2dd27 | ||
|
|
05356a3a12 | ||
|
|
ad5cbd64fc | ||
|
|
69541a8c28 | ||
|
|
96f8d2f981 | ||
|
|
67584fc365 | ||
|
|
1e1d4a3e18 | ||
|
|
0f71c238c2 | ||
|
|
76d761f7dc | ||
|
|
35880ef052 | ||
|
|
2a4cf0a9b5 | ||
|
|
e8c951d536 | ||
|
|
f1a74eefef | ||
|
|
930db0bd96 | ||
|
|
05ac71a782 | ||
|
|
03e01e52c9 | ||
|
|
19ea0d99f0 | ||
|
|
3ca4a06257 | ||
|
|
903d223daa | ||
|
|
45b19f650c | ||
|
|
8b1630c441 | ||
|
|
abd47bfe63 | ||
|
|
dc5076b217 | ||
|
|
f441e5d1cd | ||
|
|
20a35ad8d2 | ||
|
|
990c1093bb | ||
|
|
a789a844e9 | ||
|
|
be6065c7a6 | ||
|
|
13bb79bd96 | ||
|
|
acc8e89504 | ||
|
|
6de3fcaf68 | ||
|
|
18c73209e1 | ||
|
|
895cad9414 | ||
|
|
662b2b7daa | ||
|
|
a68734f82a | ||
|
|
4ebb03a368 | ||
|
|
8635b70bdb | ||
|
|
39f0b77b18 | ||
|
|
8edd8a49d4 | ||
|
|
5a9765cc0f | ||
|
|
6b4bbca160 | ||
|
|
36c6df7d16 | ||
|
|
2038f8a914 | ||
|
|
f030572252 | ||
|
|
a04d848f9c | ||
|
|
44da60c421 | ||
|
|
cb13ff9c8c | ||
|
|
eaceb3df70 | ||
|
|
ad2e501c53 | ||
|
|
3c36826507 | ||
|
|
daebda889b | ||
|
|
391532df84 | ||
|
|
02e36de369 | ||
|
|
140fcf5fbb | ||
|
|
746009d780 | ||
|
|
dbc02cc6ec | ||
|
|
80ce8ff7d6 | ||
|
|
e0e81bcd33 | ||
|
|
941304cf40 | ||
|
|
95990fbfd8 | ||
|
|
b72417259a | ||
|
|
4ff41fa4fc | ||
|
|
0b02795d70 | ||
|
|
50189ebeab | ||
|
|
2bd8d484c7 | ||
|
|
e191397fa4 | ||
|
|
8c2292ae4c | ||
|
|
aa918fba5f | ||
|
|
72e13cc0de | ||
|
|
e60c8b730b | ||
|
|
55357c9c65 | ||
|
|
3e082ef0f1 | ||
|
|
f1d5ddc3e2 | ||
|
|
b6af0a1d5f | ||
|
|
9206ff0d91 | ||
|
|
b2ee3e3cdd | ||
|
|
2a8dcac3d3 | ||
|
|
455b3df0dc | ||
|
|
856c1b6950 | ||
|
|
565fea9d62 | ||
|
|
5397501441 | ||
|
|
5d5310b509 | ||
|
|
2415937578 | ||
|
|
6dce6d44e0 | ||
|
|
d0f9fa4647 | ||
|
|
6527ca3b6e | ||
|
|
b2933fab75 | ||
|
|
248242da5b | ||
|
|
d94087afad | ||
|
|
465a6cceab | ||
|
|
51dc738292 | ||
|
|
36f38825b9 | ||
|
|
de77a0f44c | ||
|
|
628a8ae590 | ||
|
|
dd5a303113 | ||
|
|
72e39575c2 | ||
|
|
ab1673a72a | ||
|
|
c2fbb8f849 | ||
|
|
473beb873c | ||
|
|
b8bdb5e00a | ||
|
|
a9676db6c3 | ||
|
|
9848deb283 | ||
|
|
15b451ee04 | ||
|
|
8b14073176 | ||
|
|
1b5d01ee8c | ||
|
|
b37bf39a74 | ||
|
|
c768c7c8d5 | ||
|
|
5530ab0db1 | ||
|
|
8f9af5c1dd | ||
|
|
31551a2c23 | ||
|
|
91ef335394 | ||
|
|
e5f01ce172 | ||
|
|
9a24d9ddab | ||
|
|
56f2b97045 | ||
|
|
cbb1d5702b | ||
|
|
888964c17a | ||
|
|
83eca94075 | ||
|
|
390fe1e742 | ||
|
|
785a8efdf4 | ||
|
|
5275bbd5a9 | ||
|
|
95e6055522 | ||
|
|
a5b3faaa05 | ||
|
|
3da7d5b8f4 | ||
|
|
ec0ba4f1bd | ||
|
|
3adeb5a384 | ||
|
|
9b13117fd4 | ||
|
|
e370b0bd4a | ||
|
|
659e9fd5bf | ||
|
|
94e73465ad | ||
|
|
f81a302fa5 | ||
|
|
f20a1ddd05 | ||
|
|
7c3184239a | ||
|
|
9fbd0a2cc8 | ||
|
|
738564296a | ||
|
|
b47d43f268 | ||
|
|
9ac37661d2 | ||
|
|
fc8d979da4 | ||
|
|
030ff4525e | ||
|
|
c62e67b27a | ||
|
|
2a2ee20b32 | ||
|
|
e6c06078ff | ||
|
|
e110d575f0 | ||
|
|
f680091f02 | ||
|
|
edb7460928 | ||
|
|
62f22e7060 | ||
|
|
eba33a7149 | ||
|
|
d29cc2ae68 | ||
|
|
11acd1a86d | ||
|
|
3a116be09d | ||
|
|
e750f07de8 | ||
|
|
c77090d5fd | ||
|
|
f559a95304 | ||
|
|
73f00ce445 | ||
|
|
626db87158 | ||
|
|
67df0e935a | ||
|
|
231888e58a | ||
|
|
d8b9e31ac9 | ||
|
|
104a745305 | ||
|
|
aae5bb6ebb | ||
|
|
a8132a2286 | ||
|
|
42c4b8bc53 | ||
|
|
3895a2f6d3 | ||
|
|
f43ed1484e | ||
|
|
969926dfc3 | ||
|
|
36e6c8b4d8 | ||
|
|
00d8a524ab | ||
|
|
eeedf203c3 | ||
|
|
574065ff8a | ||
|
|
3bfdd76f29 | ||
|
|
f34864758e | ||
|
|
7239c5460c | ||
|
|
6b7d9be99f | ||
|
|
2eec401435 | ||
|
|
a1f3d1eba9 | ||
|
|
d81b2fd93b | ||
|
|
9592fbc94f | ||
|
|
199103498b | ||
|
|
f04f465cad | ||
|
|
575fd245ae | ||
|
|
608abc6e89 | ||
|
|
804ff87592 | ||
|
|
9ed8d412ac | ||
|
|
5615e1e3d2 | ||
|
|
1ce0d36b64 | ||
|
|
3ff79eb20d | ||
|
|
98dfe109ce | ||
|
|
f986887214 | ||
|
|
18c3c06b6e | ||
|
|
fc9a5c75e3 | ||
|
|
719d241e30 | ||
|
|
204b094554 | ||
|
|
4451747a83 | ||
|
|
cec0881290 | ||
|
|
5c71880f32 | ||
|
|
fdac3b1fe7 | ||
|
|
a02ae67559 | ||
|
|
5536bf7e30 | ||
|
|
29b4af1ab7 | ||
|
|
a82b11f1c2 | ||
|
|
679cdc40b9 | ||
|
|
efbd514af8 | ||
|
|
54d356c420 | ||
|
|
ee18dbf482 | ||
|
|
2d52f00bfe | ||
|
|
35a7b19d4d | ||
|
|
7efa076f4f | ||
|
|
6547ce1f08 | ||
|
|
a6a5f76210 | ||
|
|
685a8c66be | ||
|
|
3f39fd0943 | ||
|
|
9a33096e73 | ||
|
|
d51c1fb683 | ||
|
|
58091f6145 | ||
|
|
b21644b11b | ||
|
|
8bb145700c | ||
|
|
d9478cb614 | ||
|
|
2d6f6f811b | ||
|
|
66cfca4a60 | ||
|
|
ce8e96290c | ||
|
|
5f88ff3005 | ||
|
|
b08498318e | ||
|
|
7a3879d030 | ||
|
|
fb292a68dc | ||
|
|
19a31c4c3e | ||
|
|
3f69a3c3cb | ||
|
|
7cbf797407 | ||
|
|
fcc29c3a88 | ||
|
|
ddff18e5bf | ||
|
|
129e68f9fa | ||
|
|
1e648cabcd | ||
|
|
396c688041 | ||
|
|
601e89a4d8 | ||
|
|
1ccc4aad1d | ||
|
|
1bfc6ed242 | ||
|
|
6e46f79f14 | ||
|
|
dffe47a22e | ||
|
|
6933fb0ca6 | ||
|
|
4d37b6544c | ||
|
|
ab4e2b0112 | ||
|
|
836d23005e | ||
|
|
0103f35b50 | ||
|
|
496423095a | ||
|
|
11b61b5fe6 | ||
|
|
a02ae8696b | ||
|
|
b9f7045bc6 | ||
|
|
fdc252e475 | ||
|
|
ab56a0c0ce | ||
|
|
b526600ec6 | ||
|
|
6b7161aa92 | ||
|
|
f077413aff | ||
|
|
c4a1e9893b | ||
|
|
1a8718096b | ||
|
|
23a0fcb912 | ||
|
|
9e5e0bb930 | ||
|
|
28f878cf7c | ||
|
|
2efd823e13 | ||
|
|
bc2867f283 | ||
|
|
e3ade9d053 | ||
|
|
0ad395a9f1 | ||
|
|
cdc6e68ae1 | ||
|
|
d9216e9baf | ||
|
|
d0bdc115a7 | ||
|
|
6f5d6cb1f9 | ||
|
|
ebb7ccf96d | ||
|
|
dccf0856c7 | ||
|
|
b37fd26062 | ||
|
|
221d5d222a | ||
|
|
e71df6a94e | ||
|
|
c50793933f | ||
|
|
c99930d4d4 | ||
|
|
39c353fb8f | ||
|
|
3fd1b0d32a | ||
|
|
83fcc8f28f | ||
|
|
cbfafbb6f4 | ||
|
|
238423969c | ||
|
|
ea7d9d83f1 | ||
|
|
1c540d94de | ||
|
|
1151d324fa | ||
|
|
a8cc821de5 | ||
|
|
b057ed9eba |
13
.github/kind-cluster-1.27.yaml
vendored
13
.github/kind-cluster-1.27.yaml
vendored
@@ -1,13 +0,0 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.27.3"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.27.3"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.27.3"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.27.3"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.27.3"
|
||||
13
.github/kind-cluster-1.28.yaml
vendored
13
.github/kind-cluster-1.28.yaml
vendored
@@ -1,13 +0,0 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.28.0"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.28.0"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.28.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.28.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.28.0"
|
||||
13
.github/kind-cluster-1.29.yaml
vendored
13
.github/kind-cluster-1.29.yaml
vendored
@@ -1,13 +0,0 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.29.0"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.29.0"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.29.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.29.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.29.0"
|
||||
9
.github/kind-cluster-current.yaml
vendored
Normal file
9
.github/kind-cluster-current.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.34.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.34.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.34.0"
|
||||
9
.github/kind-cluster-next.yaml
vendored
Normal file
9
.github/kind-cluster-next.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.35.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.35.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.35.0"
|
||||
9
.github/kind-cluster-previous.yaml
vendored
Normal file
9
.github/kind-cluster-previous.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.33.4"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.33.4"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.33.4"
|
||||
30
.github/scripts/goreleaser-install.sh
vendored
30
.github/scripts/goreleaser-install.sh
vendored
@@ -1,30 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
RELEASES_URL="https://github.com/goreleaser/goreleaser/releases"
|
||||
FILE_BASENAME="goreleaser"
|
||||
|
||||
test -z "$VERSION" && {
|
||||
echo "Unable to get goreleaser version." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
test -z "$TMPDIR" && TMPDIR="$(mktemp -d)"
|
||||
# goreleaser uses arm64 instead of aarch64
|
||||
goreleaser_arch=$(uname -m | sed -e 's/aarch64/arm64/g' -e 's/ppc64le/ppc64/' -e 's/armv7l/armv7/' )
|
||||
TAR_FILE="$TMPDIR/${FILE_BASENAME}_$(uname -s)_${goreleaser_arch}.tar.gz"
|
||||
export TAR_FILE
|
||||
|
||||
(
|
||||
echo "Downloading GoReleaser $VERSION..."
|
||||
curl -sfLo "$TAR_FILE" \
|
||||
"$RELEASES_URL/download/$VERSION/${FILE_BASENAME}_$(uname -s)_${goreleaser_arch}.tar.gz"
|
||||
cd "$TMPDIR"
|
||||
curl -sfLo "checksums.txt" "$RELEASES_URL/download/$VERSION/checksums.txt"
|
||||
echo "Verifying checksums..."
|
||||
sha256sum --ignore-missing --quiet --check checksums.txt
|
||||
)
|
||||
|
||||
tar -xf "$TAR_FILE" -O goreleaser > "$TMPDIR/goreleaser"
|
||||
rm "$TMPDIR/checksums.txt"
|
||||
rm "$TAR_FILE"
|
||||
18
.github/workflows/codeql.yml
vendored
18
.github/workflows/codeql.yml
vendored
@@ -19,7 +19,10 @@ on:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '24 13 * * 3'
|
||||
- cron: '24 13 * * 6'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
@@ -38,12 +41,17 @@ jobs:
|
||||
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -57,7 +65,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
uses: github/codeql-action/autobuild@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
@@ -70,6 +78,6 @@ jobs:
|
||||
# ./location_of_script_within_repo/buildscript.sh
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
||||
28
.github/workflows/dependency-review.yml
vendored
Normal file
28
.github/workflows/dependency-review.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# Dependency Review Action
|
||||
#
|
||||
# This Action will scan dependency manifest files that change as part of a Pull Request,
|
||||
# surfacing known-vulnerable versions of the packages declared or updated in the PR.
|
||||
# Once installed, if the workflow run is marked as required,
|
||||
# PRs introducing known-vulnerable packages will be blocked from merging.
|
||||
#
|
||||
# Source repository: https://github.com/actions/dependency-review-action
|
||||
name: 'Dependency Review'
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@40c09b7dc99638e5ddb0bfd91c1673effc064d8a # v4.8.1
|
||||
41
.github/workflows/on-main-push.yaml
vendored
41
.github/workflows/on-main-push.yaml
vendored
@@ -10,6 +10,9 @@ env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
tag-scan-and-push-final-image:
|
||||
name: "Build, scan, and publish tagged image"
|
||||
@@ -19,16 +22,25 @@ jobs:
|
||||
contents: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
|
||||
with:
|
||||
version: 2025.10.5
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
@@ -36,30 +48,27 @@ jobs:
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@dbef88086f6cef02e264edb7dbf63250c17cef6c
|
||||
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
|
||||
- name: Build binaries
|
||||
run: make kured-release-snapshot
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/arm64, linux/amd64, linux/arm/v7, linux/arm/v6, linux/386
|
||||
@@ -70,13 +79,11 @@ jobs:
|
||||
|
||||
- name: Generate SBOM
|
||||
run: |
|
||||
.tmp/syft ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }} -o spdx > kured.sbom
|
||||
syft ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }} -o spdx > kured.sbom
|
||||
|
||||
- name: Sign and attest artifacts
|
||||
run: |
|
||||
.tmp/cosign sign -y -r ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
|
||||
.tmp/cosign sign-blob -y --output-signature kured.sbom.sig --output-certificate kured.sbom.pem kured.sbom
|
||||
|
||||
.tmp/cosign attest -y --type spdx --predicate kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
.tmp/cosign attach sbom --type spdx --sbom kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
cosign sign -y -r ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
cosign sign-blob -y --output-signature kured.sbom.sig --output-certificate kured.sbom.pem kured.sbom
|
||||
cosign attest -y --type spdx --predicate kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
cosign attach sbom --type spdx --sbom kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
|
||||
26
.github/workflows/on-pr-docs.yaml
vendored
Normal file
26
.github/workflows/on-pr-docs.yaml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Verify Docs Links
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- '**.md'
|
||||
|
||||
jobs:
|
||||
pr-check-docs-links:
|
||||
name: Check docs for incorrect links
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Link Checker
|
||||
uses: lycheeverse/lychee-action@a8c4c7cb88f0c7386610c35eb25108e448569cb0 # v2.7.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
with:
|
||||
args: --verbose --no-progress '*.md' '*.yaml' '*/*/*.go' --exclude-link-local
|
||||
fail: true
|
||||
366
.github/workflows/on-pr.yaml
vendored
366
.github/workflows/on-pr.yaml
vendored
@@ -4,91 +4,76 @@ on:
|
||||
push:
|
||||
|
||||
jobs:
|
||||
pr-gotest:
|
||||
name: Run go tests
|
||||
pr-short-tests:
|
||||
name: Run short go tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
|
||||
with:
|
||||
version: 2025.10.5
|
||||
|
||||
- name: run tests
|
||||
run: go test -json ./... > test.json
|
||||
run: make test
|
||||
|
||||
- name: Annotate tests
|
||||
if: always()
|
||||
uses: guyarb/golang-test-annoations@v0.7.0
|
||||
uses: guyarb/golang-test-annoations@2941118d7ef622b1b3771d1ff6eae9e90659eb26 # v0.8.0
|
||||
with:
|
||||
test-results: test.json
|
||||
|
||||
pr-shellcheck:
|
||||
name: Lint bash code with shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run ShellCheck
|
||||
uses: bewuethr/shellcheck-action@v2
|
||||
|
||||
pr-lint-code:
|
||||
name: Lint golang code
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Lint cmd folder
|
||||
uses: Jerome1337/golint-action@v1.0.3
|
||||
with:
|
||||
golint-path: './cmd/...'
|
||||
- name: Lint pkg folder
|
||||
uses: Jerome1337/golint-action@v1.0.3
|
||||
with:
|
||||
golint-path: './pkg/...'
|
||||
|
||||
pr-check-docs-links:
|
||||
name: Check docs for incorrect links
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Link Checker
|
||||
uses: lycheeverse/lychee-action@c3089c702fbb949e3f7a8122be0c33c017904f9b
|
||||
env:
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
with:
|
||||
args: --verbose --no-progress '*.md' '*.yaml' '*/*/*.go' --exclude-link-local
|
||||
fail: true
|
||||
|
||||
# This should not be made a mandatory test
|
||||
# It is only used to make us aware of any potential security failure, that
|
||||
# It is only used to make us aware of any potential security failure that
|
||||
# should trigger a bump of the image in build/.
|
||||
pr-vuln-scan:
|
||||
name: Build image and scan it against known vulnerabilities
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
|
||||
with:
|
||||
version: 2025.10.5
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
|
||||
- name: Build image
|
||||
run: VERSION="${{ steps.tags.outputs.sha_short }}" make image
|
||||
run: VERSION="${{ steps.tags.outputs.sha_short }}" DH_ORG="${{ github.repository_owner }}" make image
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
|
||||
with:
|
||||
image-ref: 'ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }}'
|
||||
format: 'table'
|
||||
@@ -104,251 +89,90 @@ jobs:
|
||||
# (compared to helm charts, manifests cannot easily template changes based on versions)
|
||||
# Helm charts are _trailing_ releases, while manifests are done during development.
|
||||
# This test uses the "command" reboot-method.
|
||||
e2e-manifests-command:
|
||||
name: End-to-End test with kured with code and manifests from HEAD (command)
|
||||
e2e-manifests:
|
||||
name: End-to-End test with kured with code and manifests from HEAD
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetes:
|
||||
- "1.27"
|
||||
- "1.28"
|
||||
- "1.29"
|
||||
testname:
|
||||
- "TestE2EWithCommand"
|
||||
- "TestE2EWithSignal"
|
||||
- "TestE2EConcurrentWithCommand"
|
||||
- "TestE2EConcurrentWithSignal"
|
||||
kubernetes_version:
|
||||
- "previous"
|
||||
- "current"
|
||||
- "next"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
|
||||
with:
|
||||
version: 2025.10.5
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
- name: Build artifacts
|
||||
run: |
|
||||
VERSION="${{ steps.tags.outputs.sha_short }}" make image
|
||||
VERSION="${{ steps.tags.outputs.sha_short }}" make manifest
|
||||
|
||||
- name: Workaround "Failed to attach 1 to compat systemd cgroup /actions_job/..." on gh actions
|
||||
run: |
|
||||
sudo bash << EOF
|
||||
cp /etc/docker/daemon.json /etc/docker/daemon.json.old
|
||||
echo '{}' > /etc/docker/daemon.json
|
||||
systemctl restart docker || journalctl --no-pager -n 500
|
||||
systemctl status docker
|
||||
EOF
|
||||
|
||||
# Default name for helm/kind-action kind clusters is "chart-testing"
|
||||
- name: Create kind cluster with 5 nodes
|
||||
uses: helm/kind-action@v1.8.0
|
||||
with:
|
||||
config: .github/kind-cluster-${{ matrix.kubernetes }}.yaml
|
||||
version: v0.14.0
|
||||
|
||||
- name: Preload previously built images onto kind cluster
|
||||
run: kind load docker-image ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }} --name chart-testing
|
||||
|
||||
- name: Do not wait for an hour before detecting the rebootSentinel
|
||||
run: |
|
||||
sed -i 's/#\(.*\)--period=1h/\1--period=30s/g' kured-ds.yaml
|
||||
|
||||
- name: Install kured with kubectl
|
||||
run: |
|
||||
kubectl apply -f kured-rbac.yaml && kubectl apply -f kured-ds.yaml
|
||||
|
||||
- name: Ensure kured is ready
|
||||
uses: nick-invision/retry@v2.9.0
|
||||
with:
|
||||
timeout_minutes: 10
|
||||
max_attempts: 10
|
||||
retry_wait_seconds: 60
|
||||
# DESIRED CURRENT READY UP-TO-DATE AVAILABLE should all be = to cluster_size
|
||||
command: "kubectl get ds -n kube-system kured | grep -E 'kured.*5.*5.*5.*5.*5'"
|
||||
|
||||
- name: Create reboot sentinel files
|
||||
run: |
|
||||
./tests/kind/create-reboot-sentinels.sh
|
||||
|
||||
- name: Follow reboot until success
|
||||
env:
|
||||
DEBUG: true
|
||||
run: |
|
||||
./tests/kind/follow-coordinated-reboot.sh
|
||||
- name: Run specific e2e tests
|
||||
run: make e2e-test ARGS="-run ^${{ matrix.testname }}/${{ matrix.kubernetes_version }}"
|
||||
|
||||
|
||||
# This ensures the latest code works with the manifests built from tree.
|
||||
# It is useful for two things:
|
||||
# - Test manifests changes (obviously), ensuring they don't break existing clusters
|
||||
# - Ensure manifests work with the latest versions even with no manifest change
|
||||
# (compared to helm charts, manifests cannot easily template changes based on versions)
|
||||
# Helm charts are _trailing_ releases, while manifests are done during development.
|
||||
# This test uses the "signal" reboot-method.
|
||||
e2e-manifests-signal:
|
||||
name: End-to-End test with kured with code and manifests from HEAD (signal)
|
||||
e2e-tests-singleversion:
|
||||
name: End-to-End test targetting a single version of kubernetes
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetes:
|
||||
- "1.27"
|
||||
- "1.28"
|
||||
- "1.29"
|
||||
testname:
|
||||
- "TestCordonningIsKept/concurrency1"
|
||||
- "TestCordonningIsKept/concurrency2"
|
||||
- "TestE2EBlocker/podblocker"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
|
||||
with:
|
||||
version: 2025.10.5
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
- name: Build artifacts
|
||||
run: |
|
||||
VERSION="${{ steps.tags.outputs.sha_short }}" make image
|
||||
VERSION="${{ steps.tags.outputs.sha_short }}" make manifest
|
||||
|
||||
- name: Workaround "Failed to attach 1 to compat systemd cgroup /actions_job/..." on gh actions
|
||||
run: |
|
||||
sudo bash << EOF
|
||||
cp /etc/docker/daemon.json /etc/docker/daemon.json.old
|
||||
echo '{}' > /etc/docker/daemon.json
|
||||
systemctl restart docker || journalctl --no-pager -n 500
|
||||
systemctl status docker
|
||||
EOF
|
||||
|
||||
# Default name for helm/kind-action kind clusters is "chart-testing"
|
||||
- name: Create kind cluster with 5 nodes
|
||||
uses: helm/kind-action@v1.8.0
|
||||
with:
|
||||
config: .github/kind-cluster-${{ matrix.kubernetes }}.yaml
|
||||
version: v0.14.0
|
||||
|
||||
- name: Preload previously built images onto kind cluster
|
||||
run: kind load docker-image ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }} --name chart-testing
|
||||
|
||||
- name: Do not wait for an hour before detecting the rebootSentinel
|
||||
run: |
|
||||
sed -i 's/#\(.*\)--period=1h/\1--period=30s/g' kured-ds-signal.yaml
|
||||
|
||||
- name: Install kured with kubectl
|
||||
run: |
|
||||
kubectl apply -f kured-rbac.yaml && kubectl apply -f kured-ds-signal.yaml
|
||||
|
||||
- name: Ensure kured is ready
|
||||
uses: nick-invision/retry@v2.9.0
|
||||
with:
|
||||
timeout_minutes: 10
|
||||
max_attempts: 10
|
||||
retry_wait_seconds: 60
|
||||
# DESIRED CURRENT READY UP-TO-DATE AVAILABLE should all be = to cluster_size
|
||||
command: "kubectl get ds -n kube-system kured | grep -E 'kured.*5.*5.*5.*5.*5'"
|
||||
|
||||
- name: Create reboot sentinel files
|
||||
run: |
|
||||
./tests/kind/create-reboot-sentinels.sh
|
||||
|
||||
- name: Follow reboot until success
|
||||
env:
|
||||
DEBUG: true
|
||||
run: |
|
||||
./tests/kind/follow-coordinated-reboot.sh
|
||||
|
||||
|
||||
|
||||
# This ensures the latest code works with the manifests built from tree.
|
||||
# It is useful for two things:
|
||||
# - Test manifests changes (obviously), ensuring they don't break existing clusters
|
||||
# - Ensure manifests work with the latest versions even with no manifest change
|
||||
# (compared to helm charts, manifests cannot easily template changes based on versions)
|
||||
# Helm charts are _trailing_ releases, while manifests are done during development.
|
||||
# Concurrency = 2
|
||||
e2e-manifests-concurent:
|
||||
name: End-to-End test with kured with code and manifests from HEAD (concurrent)
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetes:
|
||||
- "1.27"
|
||||
- "1.28"
|
||||
- "1.29"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
- name: Build artifacts
|
||||
run: |
|
||||
VERSION="${{ steps.tags.outputs.sha_short }}" make image
|
||||
VERSION="${{ steps.tags.outputs.sha_short }}" make manifest
|
||||
|
||||
- name: Workaround "Failed to attach 1 to compat systemd cgroup /actions_job/..." on gh actions
|
||||
run: |
|
||||
sudo bash << EOF
|
||||
cp /etc/docker/daemon.json /etc/docker/daemon.json.old
|
||||
echo '{}' > /etc/docker/daemon.json
|
||||
systemctl restart docker || journalctl --no-pager -n 500
|
||||
systemctl status docker
|
||||
EOF
|
||||
|
||||
# Default name for helm/kind-action kind clusters is "chart-testing"
|
||||
- name: Create kind cluster with 5 nodes
|
||||
uses: helm/kind-action@v1.8.0
|
||||
with:
|
||||
config: .github/kind-cluster-${{ matrix.kubernetes }}.yaml
|
||||
version: v0.14.0
|
||||
|
||||
- name: Preload previously built images onto kind cluster
|
||||
run: kind load docker-image ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }} --name chart-testing
|
||||
|
||||
- name: Do not wait for an hour before detecting the rebootSentinel
|
||||
run: |
|
||||
sed -i 's/#\(.*\)--period=1h/\1--period=30s/g' kured-ds.yaml
|
||||
sed -i 's/#\(.*\)--concurrency=1/\1--concurrency=2/g' kured-ds.yaml
|
||||
|
||||
- name: Install kured with kubectl
|
||||
run: |
|
||||
kubectl apply -f kured-rbac.yaml && kubectl apply -f kured-ds.yaml
|
||||
|
||||
- name: Ensure kured is ready
|
||||
uses: nick-invision/retry@v2.9.0
|
||||
with:
|
||||
timeout_minutes: 10
|
||||
max_attempts: 10
|
||||
retry_wait_seconds: 60
|
||||
# DESIRED CURRENT READY UP-TO-DATE AVAILABLE should all be = to cluster_size
|
||||
command: "kubectl get ds -n kube-system kured | grep -E 'kured.*5.*5.*5.*5.*5'"
|
||||
|
||||
- name: Create reboot sentinel files
|
||||
run: |
|
||||
./tests/kind/create-reboot-sentinels.sh
|
||||
|
||||
- name: Follow reboot until success
|
||||
env:
|
||||
DEBUG: true
|
||||
run: |
|
||||
./tests/kind/follow-coordinated-reboot.sh
|
||||
- name: Run specific e2e tests
|
||||
run: make e2e-test ARGS="-run ^${{ matrix.testname }}"
|
||||
|
||||
51
.github/workflows/on-tag.yaml
vendored
51
.github/workflows/on-tag.yaml
vendored
@@ -12,6 +12,9 @@ env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
tag-scan-and-push-final-image:
|
||||
name: "Build, scan, and publish tagged image"
|
||||
@@ -21,37 +24,49 @@ jobs:
|
||||
contents: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
|
||||
with:
|
||||
version: 2025.10.5
|
||||
|
||||
- name: Find current tag version
|
||||
run: echo "version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Build binaries
|
||||
run: make kured-release-tag
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build single image for scan
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64
|
||||
push: false
|
||||
load: true
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
|
||||
with:
|
||||
image-ref: '${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}'
|
||||
format: 'table'
|
||||
@@ -61,7 +76,7 @@ jobs:
|
||||
severity: 'CRITICAL,HIGH'
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
@@ -69,12 +84,12 @@ jobs:
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@dbef88086f6cef02e264edb7dbf63250c17cef6c
|
||||
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build release images
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/arm64, linux/amd64, linux/arm/v7, linux/arm/v6, linux/386
|
||||
@@ -85,13 +100,11 @@ jobs:
|
||||
|
||||
- name: Generate SBOM
|
||||
run: |
|
||||
.tmp/syft ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }} -o spdx > kured.sbom
|
||||
syft ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }} -o spdx > kured.sbom
|
||||
|
||||
- name: Sign and attest artifacts
|
||||
run: |
|
||||
.tmp/cosign sign -y -r ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
|
||||
.tmp/cosign sign-blob -y --output-signature kured.sbom.sig kured.sbom
|
||||
|
||||
.tmp/cosign attest -y --type spdx --predicate kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
.tmp/cosign attach sbom --type spdx --sbom kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
cosign sign -y -r ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
cosign sign-blob -y --output-signature kured.sbom.sig kured.sbom
|
||||
cosign attest -y --type spdx --predicate kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
cosign attach sbom --type spdx --sbom kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
|
||||
@@ -2,20 +2,32 @@ name: Daily jobs
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
- cron: "30 1 * * 6"
|
||||
|
||||
jobs:
|
||||
periodics-gotest:
|
||||
name: Run go tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
|
||||
with:
|
||||
version: 2025.10.5
|
||||
|
||||
- name: run tests
|
||||
run: go test -json ./... > test.json
|
||||
run: make test
|
||||
|
||||
- name: Annotate tests
|
||||
if: always()
|
||||
uses: guyarb/golang-test-annoations@v0.7.0
|
||||
uses: guyarb/golang-test-annoations@2941118d7ef622b1b3771d1ff6eae9e90659eb26 # v0.8.0
|
||||
with:
|
||||
test-results: test.json
|
||||
|
||||
@@ -23,9 +35,14 @@ jobs:
|
||||
name: Mark stale issues and PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Stale by default waits for 60 days before marking PR/issues as stale, and closes them after 21 days.
|
||||
# Stale, by default, waits for 60 days before marking PR/issues as stale and closes them after 21 days.
|
||||
# Do not expire the first issues that would allow the community to grow.
|
||||
- uses: actions/stale@v9
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'This issue was automatically considered stale due to lack of activity. Please update it and/or join our slack channels to promote it, before it automatically closes (in 7 days).'
|
||||
@@ -33,15 +50,22 @@ jobs:
|
||||
stale-issue-label: 'no-issue-activity'
|
||||
stale-pr-label: 'no-pr-activity'
|
||||
exempt-issue-labels: 'good first issue,keep'
|
||||
exempt-pr-labels: 'keep'
|
||||
days-before-close: 21
|
||||
|
||||
check-docs-links:
|
||||
name: Check docs for incorrect links
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Link Checker
|
||||
uses: lycheeverse/lychee-action@c3089c702fbb949e3f7a8122be0c33c017904f9b
|
||||
uses: lycheeverse/lychee-action@a8c4c7cb88f0c7386610c35eb25108e448569cb0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
with:
|
||||
@@ -52,25 +76,37 @@ jobs:
|
||||
name: Build image and scan it against known vulnerabilities
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
|
||||
with:
|
||||
version: 2025.10.5
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
|
||||
- name: Build artifacts
|
||||
run: VERSION="${{ steps.tags.outputs.sha_short }}" make image
|
||||
run: VERSION="${{ steps.tags.outputs.sha_short }}" DH_ORG="${{ github.repository_owner }}" make image
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
|
||||
with:
|
||||
image-ref: 'ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }}'
|
||||
format: 'table'
|
||||
78
.github/workflows/scorecard.yml
vendored
Normal file
78
.github/workflows/scorecard.yml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
# This workflow uses actions that are not certified by GitHub. They are provided
|
||||
# by a third-party and are governed by separate terms of service, privacy
|
||||
# policy, and support documentation.
|
||||
|
||||
name: Scorecard supply-chain security
|
||||
on:
|
||||
# For Branch-Protection check. Only the default branch is supported. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
|
||||
branch_protection_rule:
|
||||
# To guarantee Maintained check is occasionally updated. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
|
||||
schedule:
|
||||
- cron: '34 3 * * 6'
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecard analysis
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Needed to publish results and get a badge (see publish_results below).
|
||||
id-token: write
|
||||
# Uncomment the permissions below if installing in a private repository.
|
||||
# contents: read
|
||||
# actions: read
|
||||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecard on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
|
||||
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
|
||||
|
||||
# Public repositories:
|
||||
# - Publish results to OpenSSF REST API for easy access by consumers
|
||||
# - Allows the repository to include the Scorecard badge.
|
||||
# - See https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories:
|
||||
# - `publish_results` will always be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v3.pre.node20
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard (optional).
|
||||
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -2,4 +2,5 @@ cmd/kured/kured
|
||||
vendor
|
||||
build
|
||||
dist
|
||||
.tmp
|
||||
test.json
|
||||
tests/kind/testfiles/*.yaml
|
||||
|
||||
37
.golangci.yml
Normal file
37
.golangci.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
version: "2"
|
||||
#timeout : 5m we can add this if needed
|
||||
modules-download-mode: readonly
|
||||
run:
|
||||
tests: false
|
||||
linters:
|
||||
enable:
|
||||
- govet
|
||||
- staticcheck
|
||||
- unused
|
||||
- contextcheck
|
||||
- goconst
|
||||
- gosec
|
||||
- testifylint
|
||||
- errcheck
|
||||
- revive
|
||||
|
||||
linters-settings:
|
||||
errcheck:
|
||||
check-type-assertions: true
|
||||
check-blank: true
|
||||
revive:
|
||||
severity: warning
|
||||
confidence: 0.8
|
||||
rules:
|
||||
- name: indent-error-flow
|
||||
- name: var-naming
|
||||
- name: import-shadowing
|
||||
# https://github.com/mgechev/revive/blob/HEAD/RULES_DESCRIPTIONS.md#package-comments
|
||||
- name: package-comments # This is not working!
|
||||
disabled: true
|
||||
output:
|
||||
format: colored-line-number
|
||||
print-issued-lines: true
|
||||
print-linter-name: true
|
||||
uniq-by-line: false
|
||||
sort-results: true
|
||||
8
.mise/config.toml
Normal file
8
.mise/config.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
[tools]
|
||||
cosign = "2.2.3"
|
||||
golangci-lint = "2.1.6"
|
||||
goreleaser = "1.24.0"
|
||||
kind = "0.31.0"
|
||||
kubectl = "1.35.0"
|
||||
shellcheck = "0.11.0"
|
||||
syft = "1.0.1"
|
||||
0
.trivyignore
Normal file
0
.trivyignore
Normal file
@@ -1,3 +1,3 @@
|
||||
## Kured Community Code of Conduct
|
||||
# Kured Community Code of Conduct
|
||||
|
||||
Kured follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
324
CONTRIBUTING.md
324
CONTRIBUTING.md
@@ -5,17 +5,46 @@ Slack][slack], reporting or triaging [issues][issues] or contributing code
|
||||
to `kured`.
|
||||
|
||||
In any case, it will make sense to familiarise yourself with the main
|
||||
[README][readme] to understand the different features and options, which is
|
||||
helpful for testing. The "building" section in particular makes sense if
|
||||
you are planning to contribute code.
|
||||
[documentation][documentation] to understand the different features and
|
||||
options, which is helpful for testing. The "building" section in
|
||||
particular makes sense if you are planning to contribute code.
|
||||
|
||||
[slack]: README.md#getting-help
|
||||
[slack]: https://github.com/kubereboot/kured/blob/main/README.md#getting-help
|
||||
[issues]: https://github.com/kubereboot/kured/issues
|
||||
[readme]: README.md
|
||||
[documentation]: https://kured.dev/docs
|
||||
|
||||
## Certificate of Origin
|
||||
## Prepare your environment
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
### Your IDE
|
||||
|
||||

|
||||
|
||||
The core team has access to Goland from [JetBrains][JetBrains]. Huge thanks to them for their sponsorship!
|
||||
|
||||
You can use the IDE you prefer. Don't include anything from your IDE in the .gitignore. Please do so in your global .gitignore.
|
||||
|
||||
[JetBrains]: https://www.jetbrains.com/
|
||||
|
||||
### Basic binaries required
|
||||
|
||||
Your system needs at least the following binaries installed:
|
||||
|
||||
- make
|
||||
- sed
|
||||
- find
|
||||
- bash (command, echo)
|
||||
- docker (for docker buildx)
|
||||
- go (see the required version in go.mod)
|
||||
- mise (to manage developer tools. See [mise installation instructions](https://mise.jdx.dev/installing-mise.html).)
|
||||
|
||||
### Fetch the additional binaries required
|
||||
|
||||
Please run `make install-tools` once on a fresh repository clone to download necessary developer tools.
|
||||
Installed tools are listed in [.mise directory](.mise/config.toml).
|
||||
|
||||
### Configure your git for the "Certificate of Origin"
|
||||
|
||||
By contributing to this project, you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution.
|
||||
@@ -31,136 +60,141 @@ The signature must contain your real name
|
||||
If your `user.name` and `user.email` are configured in your Git config,
|
||||
you can sign your commit automatically with `git commit -s`.
|
||||
|
||||
## Kured Repositories
|
||||
## Get to know Kured repositories
|
||||
|
||||
All Kured repositories are kept under <https://github.com/kubereboot>. To find the code and work on the individual pieces that make Kured, here is our overview:
|
||||
|
||||
| Repositories | Contents |
|
||||
| --------------------------------------- | ------------------------- |
|
||||
|-----------------------------------------|---------------------------|
|
||||
| <https://github.com/kubereboot/kured> | Kured operator itself |
|
||||
| <https://github.com/kubereboot/charts> | Helm chart |
|
||||
| <https://github.com/kubereboot/website> | website and documentation |
|
||||
|
||||
## Regular development activities
|
||||
We use GitHub Actions in all our repositories.
|
||||
|
||||
### Prepare environment
|
||||
### Charts repo structure highlights
|
||||
|
||||
Please run `make bootstrap-tools` once on a fresh repository clone to download several needed tools, e.g. GoReleaser.
|
||||
- We use GitHub Actions to do the chart testing. Only linting/installation happens, no e2e test is done.
|
||||
- [charts/kured](https://github.com/kubereboot/charts/tree/main/charts/kured) is the place to contribute changes to the chart. Please bump [Chart.yaml](https://github.com/kubereboot/charts/blob/main/charts/kured/Chart.yaml) at each change according to semver.
|
||||
|
||||
### Kured repo structure highlights
|
||||
|
||||
Kured's main code can be found in the [`cmd`](cmd) and [`pkg`](pkg) directories
|
||||
|
||||
Keep in mind we always want to guarantee that `kured` works for the previous, current, and next
|
||||
minor version of kubernetes according to `client-go` and `kubectl` dependencies in use.
|
||||
|
||||
Our e2e tests are in the [`tests`](tests) directory. These are deep tests using our manifests with different params, on all supported k8s versions of a release.
|
||||
They are expensive but allow us to catch many issues quickly. If you want to ensure your scenario works, add an e2e test for it! Those e2e tests are encouraged by the maintainer team (See below).
|
||||
|
||||
We also have other tests:
|
||||
|
||||
- golangci-lint , shellcheck
|
||||
- a security check against our base image (alpine)
|
||||
|
||||
All these tests are run on every PR/tagged release. See [.github/workflows](.github/workflows) for more details.
|
||||
|
||||
We use [GoReleaser to build](.goreleaser.yml).
|
||||
|
||||
## Regular development activities / maintenance
|
||||
|
||||
### Updating k8s support
|
||||
|
||||
Whenever we want to update e.g. the `kubectl` or `client-go` dependencies,
|
||||
some RBAC changes might be necessary too.
|
||||
At each new major release of kubernetes, we update our dependencies.
|
||||
|
||||
This is what it took to support Kubernetes 1.14:
|
||||
<https://github.com/kubereboot/kured/pull/75>
|
||||
Beware that whenever we want to update e.g. the `kubectl` or `client-go` dependencies, some other impactful changes might be necessary too.
|
||||
(RBAC, drain behaviour changes, ...)
|
||||
|
||||
That the process can be more involved based on kubernetes changes.
|
||||
For example, k8s 1.10 changes to apps triggered the following commits:
|
||||
A few examples of what it took to support:
|
||||
|
||||
b3f9ddf: Bump client-go for optimum k8s 1.10 compatibility
|
||||
bc3f28d: Move deployment manifest to apps/v1
|
||||
908998a: Update RBAC permissions for kubectl v1.10.3
|
||||
efbb0c3: Document version compatibility in release notes
|
||||
5731b98: Add warning to Dockerfile re: upgrading kubectl
|
||||
- Kubernetes 1.10 <https://github.com/kubereboot/kured/commit/b3f9ddf> + <https://github.com/kubereboot/kured/commit/bc3f28d> + <https://github.com/kubereboot/kured/commit/908998a> + <https://github.com/kubereboot/kured/commit/efbb0c3> + <https://github.com/kubereboot/kured/commit/5731b98>
|
||||
- Kubernetes 1.14 <https://github.com/kubereboot/kured/pull/75>
|
||||
- Kubernetes 1.34 <https://github.com/kubereboot/kured/commit/6ab853dd711ee264663184976ae492a20b657b0a>
|
||||
|
||||
Search the git log for inspiration for your cases.
|
||||
|
||||
Please update our `.github/workflows` with the new k8s images, starting by
|
||||
the creation of a `.github/kind-cluster-<version>.yaml`, then updating
|
||||
our workflows with the new versions.
|
||||
In general, the following activities have to happen:
|
||||
|
||||
Once you updated everything, make sure you update the support matrix on
|
||||
the main [README][readme] as well.
|
||||
- Bump kind and its images (see below)
|
||||
- `go get k8s.io/kubectl@v0.{version}`
|
||||
|
||||
### bump kind images support
|
||||
|
||||
Go to `.github/workflows` and update the new k8s images. For that:
|
||||
|
||||
- `cp .github/kind-cluster-current.yaml .github/kind-cluster-previous.yaml`
|
||||
- `cp .github/kind-cluster-next.yaml .github/kind-cluster-current.yaml`
|
||||
- Then edit `.github/kind-cluster-next.yaml` to point to the new version.
|
||||
|
||||
This will make the full test matrix updated (the CI and the test code).
|
||||
|
||||
Once your code passes all tests, update the support matrix in
|
||||
the [installation docs](https://kured.dev/docs/installation/).
|
||||
|
||||
Beware that sometimes you also need to update the Kind version. grep in the [.github/workflows](.github/workflows) for the kind version.
|
||||
|
||||
### Updating other dependencies
|
||||
|
||||
Dependabot proposes changes in our go.mod/go.sum.
|
||||
Some of those changes are covered by CI testing, some are not.
|
||||
Dependabot proposes changes in our `go.mod`/`go.sum`.
|
||||
CI testing covers some of those changes, but not all.
|
||||
|
||||
Please make sure to test those not covered by CI (mostly the integration
|
||||
with other tools) manually before merging.
|
||||
|
||||
In all cases, review dependabot changes: Imagine all changes as a possible supply chain
|
||||
attack vector. You then need to review the proposed changes by dependabot and evaluate the trust/risks.
|
||||
|
||||
### Review periodic jobs
|
||||
|
||||
We run periodic jobs (see also Automated testing section of this documentation).
|
||||
Those should be monitored for failures.
|
||||
|
||||
If a failure happen in periodics, something terribly wrong must have happened
|
||||
(or github is failing at the creation of a kind cluster). Please monitor those
|
||||
If a failure happens in periodic testing, something terribly wrong must have happened
|
||||
(or GitHub is failing at the creation of a kind cluster). Please monitor those
|
||||
failures carefully.
|
||||
|
||||
### Introducing new features
|
||||
## Testing kured
|
||||
|
||||
When you introduce a new feature, the kured team expects you to have tested
|
||||
your change thoroughly. If possible, include all the necessary testing in your change.
|
||||
If you have developed anything (or just want to take kured for a spin!), run the following tests.
|
||||
As they will run in CI, we will quickly catch if you did not test before submitting your PR.
|
||||
|
||||
If your change involves a user facing change (change in flags of kured for example),
|
||||
please include expose your new feature in our default manifest (`kured-ds.yaml`),
|
||||
as a comment.
|
||||
### Linting
|
||||
|
||||
Our release manifests and helm charts are our stable interfaces.
|
||||
Any user facing changes will therefore have to wait for a release before being
|
||||
exposed to our users.
|
||||
We use [`golangci-lint`](https://golangci-lint.run/) for Go code linting.
|
||||
|
||||
This also means that when you expose a new feature, you should create another PR
|
||||
for your changes in <https://github.com/kubereboot/charts> to make your feature
|
||||
available at the next kured version for helm users.
|
||||
To run lint checks locally:
|
||||
|
||||
In the charts PR, you can directly bump the appVersion to the next minor version
|
||||
(you are introducing a new feature, which requires a bump of the minor number.
|
||||
For example, if current appVersion is 1.6.x, make sure you update your appVersion
|
||||
to 1.7.0). It allows us to have an easy view of what we land each release.
|
||||
```bash
|
||||
make lint
|
||||
```
|
||||
|
||||
Do not hesitate to increase the test coverage for your feature, whether it's unit
|
||||
testing to full functional testing (even using helm charts)
|
||||
### Quick Golang code testing
|
||||
|
||||
### Increasing test coverage
|
||||
Please run `make test` to run only the basic tests. It gives a good
|
||||
idea of the code behaviour.
|
||||
|
||||
We are welcoming any change to increase our test coverage.
|
||||
See also our github issues for the label `testing`.
|
||||
### Functional testing
|
||||
|
||||
## Automated testing
|
||||
For functional testing, the maintainer team is using `minikube` or `kind` (explained below), but also encourages you to test kured on your own cluster(s).
|
||||
|
||||
Our CI is covered by github actions.
|
||||
You can see their contents in .github/workflows.
|
||||
#### Testing on your own cluster
|
||||
|
||||
We currently run:
|
||||
This will allow the community to catch issues that might not have been tested in our CI, like integration with other tools, or your specific use case.
|
||||
|
||||
- go tests and lint
|
||||
- `shellcheck`
|
||||
- a check for dead links in our docs
|
||||
- a security check against our base image (alpine)
|
||||
- a deep functional test using our manifests on all supported k8s versions
|
||||
|
||||
To test your code manually, follow the section Manual testing.
|
||||
|
||||
## Manual (release) testing
|
||||
|
||||
Before `kured` is released, we want to make sure it still works fine on the
|
||||
previous, current and next minor version of Kubernetes (with respect to the
|
||||
`client-go` & `kubectl` dependencies in use). For local testing e.g.
|
||||
`minikube` or `kind` can be sufficient. This will allow you to catch issues
|
||||
that might not have been tested in our CI, like integration with other tools,
|
||||
or your specific use case.
|
||||
|
||||
Deploy kured in your test scenario, make sure you pass the right `image`,
|
||||
update the e.g. `period` and `reboot-days` options, so you get immediate
|
||||
results, if you login to a node and run:
|
||||
To test kured on your own cluster, make sure you pass the right `image`, update the `period` and `reboot-days` (so you get immediate results), and update any other flags for your cases.
|
||||
Then login to a node and run:
|
||||
|
||||
```console
|
||||
sudo touch /var/run/reboot-required
|
||||
```
|
||||
|
||||
### Example of golang testing
|
||||
Then tell us about everything that went well or went wrong in Slack.
|
||||
|
||||
Please run `make test`. You should have `golint` installed.
|
||||
|
||||
### Example of testing with `minikube`
|
||||
#### Testing with `minikube`
|
||||
|
||||
A test-run with `minikube` could look like this:
|
||||
|
||||
```console
|
||||
```cli
|
||||
# start minikube
|
||||
minikube start --driver=kvm2 --kubernetes-version <k8s-release>
|
||||
|
||||
@@ -183,16 +217,16 @@ minikube logs -f
|
||||
|
||||
Now check for the 'Commanding reboot' message and minikube going down.
|
||||
|
||||
Unfortunately as of today, you are going to run into
|
||||
Unfortunately, as of today, you are going to run into
|
||||
<https://github.com/kubernetes/minikube/issues/2874>. This means that
|
||||
minikube won't come back easily. You will need to start minikube again.
|
||||
Then you can check for the lock release.
|
||||
|
||||
### Example of testing with `kind`
|
||||
#### Testing with `kind` "The hard way"
|
||||
|
||||
A test-run with `kind` could look like this:
|
||||
|
||||
```console
|
||||
```cli
|
||||
# create kind cluster
|
||||
kind create cluster --config .github/kind-cluster-<k8s-version>.yaml
|
||||
|
||||
@@ -204,38 +238,120 @@ kind create cluster --config .github/kind-cluster-<k8s-version>.yaml
|
||||
|
||||
```
|
||||
|
||||
### Testing with `kind` "The easy way"
|
||||
|
||||
You can automate the test with `kind` by using the same code as the CI.
|
||||
|
||||
```cli
|
||||
# Build kured:dev image, build manifests, and run the "long" go tests
|
||||
make e2e-test
|
||||
```
|
||||
|
||||
You can alter the test behaviour by passing arguments to this command.
|
||||
A few examples are given below:
|
||||
|
||||
```shell
|
||||
# Run only TestE2EWithSignal test for the kubernetes version named "current" (see kind file)
|
||||
make e2e-test ARGS="-run ^TestE2EWithSignal/current"
|
||||
# Run all tests but make sure to extend the timeout, for slower machines.
|
||||
make e2e-test ARGS="-timeout 1200s'
|
||||
```
|
||||
|
||||
## Introducing new features
|
||||
|
||||
When you introduce a new feature, the kured team expects you to have tested (see above!)
|
||||
your change thoroughly. If possible, include all the necessary testing in your change.
|
||||
|
||||
If your change involves a user facing change (change in flags of kured for example),
|
||||
please include expose your new feature in our default manifest (`kured-ds.yaml`),
|
||||
as a comment.
|
||||
|
||||
Our release manifests and helm charts are our stable interfaces.
|
||||
Any user facing changes will therefore have to wait for a release before being
|
||||
exposed to our users.
|
||||
|
||||
This also means that when you expose a new feature, you should create another PR
|
||||
for your changes in <https://github.com/kubereboot/charts> to make your feature
|
||||
available at the next kured version for helm users.
|
||||
|
||||
In the charts PR, you can directly bump the `appVersion` to the next minor version
|
||||
(you are introducing a new feature, which requires a bump of the minor number.
|
||||
For example, if current `appVersion` is `1.6.x`, make sure you update your `appVersion`
|
||||
to `1.7.0`). It allows us to have an easy view of what we land each release.
|
||||
|
||||
Do not hesitate to increase the test coverage for your feature, whether it's unit testing
|
||||
to full functional testing (even using helm charts).
|
||||
|
||||
The team of kured is small, so we will most likely refuse any feature adding maintenance burden.
|
||||
|
||||
## Introducing changes in the helm chart
|
||||
|
||||
When you change the helm chart, remember to bump its version according to semver.
|
||||
Changes to defaults are frowned upon unless absolutely necessary.
|
||||
|
||||
## Introducing new tests / increasing test coverage
|
||||
|
||||
On the opposite of features, we welcome ALL features increasing our stability and test coverage.
|
||||
See also our GitHub issues with the label [`testing`](https://github.com/kubereboot/kured/labels/testing).
|
||||
|
||||
## Publishing a new kured release
|
||||
|
||||
### Prepare Documentation
|
||||
### Double-check the latest kubernetes patch version
|
||||
|
||||
Check that [compatibility matrix](https://kured.dev/docs/installation/) is updated
|
||||
to the new version you want to release.
|
||||
Ensure you have used the latest patch version in the tree.
|
||||
Check the documentation "Updating k8s support" if the minor version was not yet applied.
|
||||
|
||||
### Create a tag on the repo
|
||||
|
||||
Before going further, we should freeze the code for a release, by
|
||||
tagging the code. The Github-Action should start a new job and push
|
||||
the new image to the registry.
|
||||
|
||||
### Create the combined manifest
|
||||
|
||||
Now create the `kured-<release>-dockerhub.yaml` for e.g. `1.3.0`:
|
||||
### Update the manifests with the new version
|
||||
|
||||
```sh
|
||||
VERSION=1.3.0
|
||||
MANIFEST="kured-$VERSION-dockerhub.yaml"
|
||||
export VERSION=1.20.0
|
||||
make DH_ORG="kubereboot" VERSION="${VERSION}" manifest
|
||||
```
|
||||
Create a commit updating the manifest with future image [like this one](https://github.com/kubereboot/kured/commit/58091f6145771f426b4b9e012a43a9c847af2560).
|
||||
|
||||
### Create the combined manifest for the new release
|
||||
|
||||
Now create the `kured-<new version>-combined.yaml` for e.g. `1.20.0`:
|
||||
|
||||
```sh
|
||||
export VERSION=1.20.0
|
||||
export MANIFEST="kured-$VERSION-combined.yaml"
|
||||
make DH_ORG="kubereboot" VERSION="${VERSION}" manifest # just to be safe
|
||||
cat kured-rbac.yaml > "$MANIFEST"
|
||||
cat kured-ds.yaml >> "$MANIFEST"
|
||||
```
|
||||
|
||||
### Publish release artifacts
|
||||
### Create the new version tag on the repo (optional, can also be done directly in GH web interface)
|
||||
|
||||
Now you can head to the Github UI, use the version number as tag and upload the
|
||||
`kured-<release>-dockerhub.yaml` file.
|
||||
Tag the previously created commit with the future release version.
|
||||
The GitHub Actions workflow will push the new image to the registry.
|
||||
|
||||
### Publish new version release artifacts
|
||||
|
||||
Now you can head to the GitHub UI for releases, drafting a new
|
||||
release. Chose, as tag, the new version number.
|
||||
|
||||
Click to generate the release notes.
|
||||
|
||||
Fill, as name, "Kured <new version>".
|
||||
|
||||
Edit the generated text.
|
||||
|
||||
Please describe what's new and noteworthy in the release notes, list the PRs
|
||||
that landed and give a shout-out to everyone who contributed.
|
||||
|
||||
Please also note down on which releases the upcoming `kured` release was
|
||||
tested on. (Check old release notes if you're unsure.)
|
||||
tested on or what it supports. (Check old release notes if you're unsure.)
|
||||
|
||||
Before clicking on publishing release, upload the yaml manifest
|
||||
(`kured-<new version>-combined.yaml`) file.
|
||||
|
||||
Click on publish the release and set as the latest release.
|
||||
|
||||
### Prepare Helm chart
|
||||
|
||||
Create a commit to [bump the chart and kured version like this one](https://github.com/kubereboot/charts/commit/e0191d91c21db8338be8cbe56f8991a557048110).
|
||||
|
||||
### Prepare Documentation
|
||||
|
||||
Ensure the [compatibility matrix](https://kured.dev/docs/installation/) is updated to the new version you want to release.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM --platform=$TARGETPLATFORM alpine:3.19.0 as bin
|
||||
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS bin
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
@@ -19,7 +19,7 @@ RUN set -ex \
|
||||
esac \
|
||||
&& cp /dist/kured_${TARGETOS}_${TARGETARCH}${SUFFIX}/kured /dist/kured;
|
||||
|
||||
FROM --platform=$TARGETPLATFORM alpine:3.19.0
|
||||
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659
|
||||
RUN apk update --no-cache && apk upgrade --no-cache && apk add --no-cache ca-certificates tzdata
|
||||
COPY --from=bin /dist/kured /usr/bin/kured
|
||||
ENTRYPOINT ["/usr/bin/kured"]
|
||||
|
||||
13
MAINTAINERS
13
MAINTAINERS
@@ -1,5 +1,12 @@
|
||||
Christian Hopf <christian.kotzbauer@gmail.com> (@ckotzbauer)
|
||||
Daniel Holbach <daniel.holbach@gmail.com> (@dholbach)
|
||||
Hidde Beydals <hidde@weave.works> (@hiddeco)
|
||||
In alphabetical order:
|
||||
|
||||
Dharsan Baskar <git@dharsanb.com> (@dharsanb)
|
||||
Jack Francis <jackfrancis@gmail.com> (@jackfrancis)
|
||||
Jean-Philippe Evrard <open-source@a.spamming.party> (@evrardjp)
|
||||
|
||||
Retired maintainers:
|
||||
|
||||
- Daniel Holbach
|
||||
- Christian Hopf
|
||||
|
||||
Thank you for your involvement, and let us not say "farewell" ...
|
||||
|
||||
68
Makefile
68
Makefile
@@ -1,41 +1,53 @@
|
||||
.DEFAULT: all
|
||||
.PHONY: all clean image minikube-publish manifest test kured-all
|
||||
.PHONY: all clean image minikube-publish manifest test kured-all lint
|
||||
|
||||
TEMPDIR=./.tmp
|
||||
GORELEASER_CMD=$(TEMPDIR)/goreleaser
|
||||
DH_ORG=kubereboot
|
||||
DH_ORG ?= kubereboot
|
||||
VERSION=$(shell git rev-parse --short HEAD)
|
||||
SUDO=$(shell docker info >/dev/null 2>&1 || echo "sudo -E")
|
||||
|
||||
all: image
|
||||
|
||||
$(TEMPDIR):
|
||||
mkdir -p $(TEMPDIR)
|
||||
|
||||
.PHONY: bootstrap-tools
|
||||
bootstrap-tools: $(TEMPDIR)
|
||||
VERSION=v1.19.2 TMPDIR=.tmp bash .github/scripts/goreleaser-install.sh
|
||||
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b .tmp v0.86.1
|
||||
curl -sSfL https://github.com/sigstore/cosign/releases/download/v2.1.1/cosign-linux-amd64 -o .tmp/cosign
|
||||
chmod +x .tmp/goreleaser .tmp/cosign .tmp/syft
|
||||
.PHONY: install-tools
|
||||
install-tools:
|
||||
command -v mise 2>&1 || { echo "please install mise to continue" >&2; exit 127; }
|
||||
mise install
|
||||
|
||||
clean:
|
||||
rm -rf ./dist
|
||||
|
||||
kured:
|
||||
$(GORELEASER_CMD) build --clean --single-target --snapshot
|
||||
goreleaser build --clean --single-target --snapshot
|
||||
|
||||
kured-all:
|
||||
$(GORELEASER_CMD) build --clean --snapshot
|
||||
goreleaser build --clean --snapshot
|
||||
|
||||
kured-release-tag:
|
||||
$(GORELEASER_CMD) release --clean
|
||||
goreleaser release --clean
|
||||
|
||||
kured-release-snapshot:
|
||||
$(GORELEASER_CMD) release --clean --snapshot
|
||||
goreleaser release --clean --snapshot
|
||||
|
||||
image: kured
|
||||
$(SUDO) docker buildx build --load -t ghcr.io/$(DH_ORG)/kured:$(VERSION) .
|
||||
$(SUDO) docker buildx build --no-cache --load -t ghcr.io/$(DH_ORG)/kured:$(VERSION) .
|
||||
|
||||
dev-image: image
|
||||
$(SUDO) docker tag ghcr.io/$(DH_ORG)/kured:$(VERSION) kured:dev
|
||||
|
||||
dev-manifest:
|
||||
# basic e2e scenario
|
||||
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' kured-ds.yaml > tests/kind/testfiles/kured-ds.yaml
|
||||
# signal e2e scenario
|
||||
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' kured-ds-signal.yaml > tests/kind/testfiles/kured-ds-signal.yaml
|
||||
# concurrency e2e command scenario
|
||||
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' -e 's/#\(.*\)--concurrency=1/\1--concurrency=2/g' kured-ds.yaml > tests/kind/testfiles/kured-ds-concurrent-command.yaml
|
||||
# concurrency e2e signal scenario
|
||||
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' -e 's/#\(.*\)--concurrency=1/\1--concurrency=2/g' kured-ds-signal.yaml > tests/kind/testfiles/kured-ds-concurrent-signal.yaml
|
||||
# pod blocker e2e signal scenario
|
||||
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' -e 's/#\(.*\)--blocking-pod-selector=name=temperamental/\1--blocking-pod-selector=app=blocker/g' kured-ds-signal.yaml > tests/kind/testfiles/kured-ds-podblocker.yaml
|
||||
|
||||
e2e-test: dev-manifest dev-image
|
||||
echo "Running ALL go tests"
|
||||
go test -count=1 -v --parallel 4 ./... $(ARGS)
|
||||
|
||||
minikube-publish: image
|
||||
$(SUDO) docker save ghcr.io/$(DH_ORG)/kured | (eval $$(minikube docker-env) && docker load)
|
||||
@@ -45,10 +57,16 @@ manifest:
|
||||
sed -i "s#image: ghcr.io/.*kured.*#image: ghcr.io/$(DH_ORG)/kured:$(VERSION)#g" kured-ds-signal.yaml
|
||||
echo "Please generate combined manifest if necessary"
|
||||
|
||||
test:
|
||||
echo "Running go tests"
|
||||
go test ./...
|
||||
echo "Running golint on pkg"
|
||||
golint ./pkg/...
|
||||
echo "Running golint on cmd"
|
||||
golint ./cmd/...
|
||||
test: lint
|
||||
@echo "Running short go tests"
|
||||
go test -test.short -json ./... > test.json
|
||||
|
||||
lint:
|
||||
@echo "Running shellcheck"
|
||||
find . -name '*.sh' | xargs -n1 shellcheck
|
||||
@echo "Running golangci-lint..."
|
||||
golangci-lint run ./...
|
||||
|
||||
lint-docs:
|
||||
@echo "Running lychee"
|
||||
mise x lychee@latest -- lychee --verbose --no-progress '*.md' '*.yaml' '*/*/*.go' --exclude-link-local
|
||||
@@ -3,8 +3,9 @@
|
||||
[](https://artifacthub.io/packages/helm/kured/kured)
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fkubereboot%2Fkured?ref=badge_shield)
|
||||
[](https://clomonitor.io/projects/cncf/kured)
|
||||
[](https://www.bestpractices.dev/projects/8867)
|
||||
|
||||
<img src="https://github.com/kubereboot/website/raw/main/static/img/kured.png" width="200" align="right"/>
|
||||
<img src="https://github.com/kubereboot/website/raw/main/static/img/kured.png" alt="kured logo" width="200" align="right"/>
|
||||
|
||||
- [kured - Kubernetes Reboot Daemon](#kured---kubernetes-reboot-daemon)
|
||||
- [Introduction](#introduction)
|
||||
@@ -59,7 +60,7 @@ Your feedback is always welcome!
|
||||
|
||||

|
||||
|
||||
The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see [Trademark Usage](https://www.linuxfoundation.org/trademark-usage/).
|
||||
The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see [Trademark Usage](https://www.linuxfoundation.org/legal/trademark-usage).
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// The main controller for kured
|
||||
// This package is a reference implementation on how to reboot your nodes based on the different
|
||||
// tools present in this project's modules
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -8,37 +11,32 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containrrr/shoutrrr"
|
||||
"github.com/kubereboot/kured/internal"
|
||||
"github.com/kubereboot/kured/pkg/blockers"
|
||||
"github.com/kubereboot/kured/pkg/checkers"
|
||||
"github.com/kubereboot/kured/pkg/daemonsetlock"
|
||||
"github.com/kubereboot/kured/pkg/delaytick"
|
||||
"github.com/kubereboot/kured/pkg/reboot"
|
||||
"github.com/kubereboot/kured/pkg/taints"
|
||||
"github.com/kubereboot/kured/pkg/timewindow"
|
||||
papi "github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
flag "github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
kubectldrain "k8s.io/kubectl/pkg/drain"
|
||||
|
||||
"github.com/google/shlex"
|
||||
|
||||
shoutrrr "github.com/containrrr/shoutrrr"
|
||||
"github.com/kubereboot/kured/pkg/alerts"
|
||||
"github.com/kubereboot/kured/pkg/daemonsetlock"
|
||||
"github.com/kubereboot/kured/pkg/delaytick"
|
||||
"github.com/kubereboot/kured/pkg/reboot"
|
||||
"github.com/kubereboot/kured/pkg/taints"
|
||||
"github.com/kubereboot/kured/pkg/timewindow"
|
||||
"github.com/kubereboot/kured/pkg/util"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -63,7 +61,7 @@ var (
|
||||
lockReleaseDelay time.Duration
|
||||
prometheusURL string
|
||||
preferNoScheduleTaintName string
|
||||
alertFilter *regexp.Regexp
|
||||
alertFilter regexpValue
|
||||
alertFilterMatchOnly bool
|
||||
alertFiringOnly bool
|
||||
rebootSentinelFile string
|
||||
@@ -108,11 +106,6 @@ const (
|
||||
// EnvPrefix The environment variable prefix of all environment variables bound to our command line flags.
|
||||
EnvPrefix = "KURED"
|
||||
|
||||
// MethodCommand is used as "--reboot-method" value when rebooting with the configured "--reboot-command"
|
||||
MethodCommand = "command"
|
||||
// MethodSignal is used as "--reboot-method" value when rebooting with a SIGRTMIN+5 signal.
|
||||
MethodSignal = "signal"
|
||||
|
||||
sigTrminPlus5 = 34 + 5
|
||||
)
|
||||
|
||||
@@ -121,138 +114,185 @@ func init() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
cmd := NewRootCommand()
|
||||
|
||||
if err := cmd.Execute(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// NewRootCommand construct the Cobra root command
|
||||
func NewRootCommand() *cobra.Command {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "kured",
|
||||
Short: "Kubernetes Reboot Daemon",
|
||||
PersistentPreRunE: bindViper,
|
||||
PreRun: flagCheck,
|
||||
Run: root}
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&nodeID, "node-id", "",
|
||||
flag.StringVar(&nodeID, "node-id", "",
|
||||
"node name kured runs on, should be passed down from spec.nodeName via KURED_NODE_ID environment variable")
|
||||
rootCmd.PersistentFlags().BoolVar(&forceReboot, "force-reboot", false,
|
||||
flag.BoolVar(&forceReboot, "force-reboot", false,
|
||||
"force a reboot even if the drain fails or times out")
|
||||
rootCmd.PersistentFlags().StringVar(&metricsHost, "metrics-host", "",
|
||||
flag.StringVar(&metricsHost, "metrics-host", "",
|
||||
"host where metrics will listen")
|
||||
rootCmd.PersistentFlags().IntVar(&metricsPort, "metrics-port", 8080,
|
||||
flag.IntVar(&metricsPort, "metrics-port", 8080,
|
||||
"port number where metrics will listen")
|
||||
rootCmd.PersistentFlags().IntVar(&drainGracePeriod, "drain-grace-period", -1,
|
||||
flag.IntVar(&drainGracePeriod, "drain-grace-period", -1,
|
||||
"time in seconds given to each pod to terminate gracefully, if negative, the default value specified in the pod will be used")
|
||||
rootCmd.PersistentFlags().StringVar(&drainPodSelector, "drain-pod-selector", "",
|
||||
flag.StringVar(&drainPodSelector, "drain-pod-selector", "",
|
||||
"only drain pods with labels matching the selector (default: '', all pods)")
|
||||
rootCmd.PersistentFlags().IntVar(&skipWaitForDeleteTimeoutSeconds, "skip-wait-for-delete-timeout", 0,
|
||||
flag.IntVar(&skipWaitForDeleteTimeoutSeconds, "skip-wait-for-delete-timeout", 0,
|
||||
"when seconds is greater than zero, skip waiting for the pods whose deletion timestamp is older than N seconds while draining a node")
|
||||
rootCmd.PersistentFlags().DurationVar(&drainDelay, "drain-delay", 0,
|
||||
flag.DurationVar(&drainDelay, "drain-delay", 0,
|
||||
"delay drain for this duration (default: 0, disabled)")
|
||||
rootCmd.PersistentFlags().DurationVar(&drainTimeout, "drain-timeout", 0,
|
||||
flag.DurationVar(&drainTimeout, "drain-timeout", 0,
|
||||
"timeout after which the drain is aborted (default: 0, infinite time)")
|
||||
rootCmd.PersistentFlags().DurationVar(&rebootDelay, "reboot-delay", 0,
|
||||
flag.DurationVar(&rebootDelay, "reboot-delay", 0,
|
||||
"delay reboot for this duration (default: 0, disabled)")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootMethod, "reboot-method", "command",
|
||||
flag.StringVar(&rebootMethod, "reboot-method", "command",
|
||||
"method to use for reboots. Available: command")
|
||||
rootCmd.PersistentFlags().DurationVar(&period, "period", time.Minute*60,
|
||||
flag.DurationVar(&period, "period", time.Minute*60,
|
||||
"sentinel check period")
|
||||
rootCmd.PersistentFlags().StringVar(&dsNamespace, "ds-namespace", "kube-system",
|
||||
flag.StringVar(&dsNamespace, "ds-namespace", "kube-system",
|
||||
"namespace containing daemonset on which to place lock")
|
||||
rootCmd.PersistentFlags().StringVar(&dsName, "ds-name", "kured",
|
||||
flag.StringVar(&dsName, "ds-name", "kured",
|
||||
"name of daemonset on which to place lock")
|
||||
rootCmd.PersistentFlags().StringVar(&lockAnnotation, "lock-annotation", KuredNodeLockAnnotation,
|
||||
flag.StringVar(&lockAnnotation, "lock-annotation", KuredNodeLockAnnotation,
|
||||
"annotation in which to record locking node")
|
||||
rootCmd.PersistentFlags().DurationVar(&lockTTL, "lock-ttl", 0,
|
||||
flag.DurationVar(&lockTTL, "lock-ttl", 0,
|
||||
"expire lock annotation after this duration (default: 0, disabled)")
|
||||
rootCmd.PersistentFlags().DurationVar(&lockReleaseDelay, "lock-release-delay", 0,
|
||||
flag.DurationVar(&lockReleaseDelay, "lock-release-delay", 0,
|
||||
"delay lock release for this duration (default: 0, disabled)")
|
||||
rootCmd.PersistentFlags().StringVar(&prometheusURL, "prometheus-url", "",
|
||||
flag.StringVar(&prometheusURL, "prometheus-url", "",
|
||||
"Prometheus instance to probe for active alerts")
|
||||
rootCmd.PersistentFlags().Var(®expValue{&alertFilter}, "alert-filter-regexp",
|
||||
flag.Var(&alertFilter, "alert-filter-regexp",
|
||||
"alert names to ignore when checking for active alerts")
|
||||
rootCmd.PersistentFlags().BoolVar(&alertFilterMatchOnly, "alert-filter-match-only", false,
|
||||
flag.BoolVar(&alertFilterMatchOnly, "alert-filter-match-only", false,
|
||||
"Only block if the alert-filter-regexp matches active alerts")
|
||||
rootCmd.PersistentFlags().BoolVar(&alertFiringOnly, "alert-firing-only", false,
|
||||
flag.BoolVar(&alertFiringOnly, "alert-firing-only", false,
|
||||
"only consider firing alerts when checking for active alerts")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootSentinelFile, "reboot-sentinel", "/var/run/reboot-required",
|
||||
flag.StringVar(&rebootSentinelFile, "reboot-sentinel", "/var/run/reboot-required",
|
||||
"path to file whose existence triggers the reboot command")
|
||||
rootCmd.PersistentFlags().StringVar(&preferNoScheduleTaintName, "prefer-no-schedule-taint", "",
|
||||
flag.StringVar(&preferNoScheduleTaintName, "prefer-no-schedule-taint", "",
|
||||
"Taint name applied during pending node reboot (to prevent receiving additional pods from other rebooting nodes). Disabled by default. Set e.g. to \"weave.works/kured-node-reboot\" to enable tainting.")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootSentinelCommand, "reboot-sentinel-command", "",
|
||||
flag.StringVar(&rebootSentinelCommand, "reboot-sentinel-command", "",
|
||||
"command for which a zero return code will trigger a reboot command")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootCommand, "reboot-command", "/bin/systemctl reboot",
|
||||
flag.StringVar(&rebootCommand, "reboot-command", "/bin/systemctl reboot",
|
||||
"command to run when a reboot is required")
|
||||
rootCmd.PersistentFlags().IntVar(&concurrency, "concurrency", 1,
|
||||
flag.IntVar(&concurrency, "concurrency", 1,
|
||||
"amount of nodes to concurrently reboot. Defaults to 1")
|
||||
rootCmd.PersistentFlags().IntVar(&rebootSignal, "reboot-signal", sigTrminPlus5,
|
||||
flag.IntVar(&rebootSignal, "reboot-signal", sigTrminPlus5,
|
||||
"signal to use for reboot, SIGRTMIN+5 by default.")
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&slackHookURL, "slack-hook-url", "",
|
||||
flag.StringVar(&slackHookURL, "slack-hook-url", "",
|
||||
"slack hook URL for reboot notifications [deprecated in favor of --notify-url]")
|
||||
rootCmd.PersistentFlags().StringVar(&slackUsername, "slack-username", "kured",
|
||||
flag.StringVar(&slackUsername, "slack-username", "kured",
|
||||
"slack username for reboot notifications")
|
||||
rootCmd.PersistentFlags().StringVar(&slackChannel, "slack-channel", "",
|
||||
flag.StringVar(&slackChannel, "slack-channel", "",
|
||||
"slack channel for reboot notifications")
|
||||
rootCmd.PersistentFlags().StringVar(¬ifyURL, "notify-url", "",
|
||||
flag.StringVar(¬ifyURL, "notify-url", "",
|
||||
"notify URL for reboot notifications (cannot use with --slack-hook-url flags)")
|
||||
rootCmd.PersistentFlags().StringVar(&messageTemplateUncordon, "message-template-uncordon", "Node %s rebooted & uncordoned successfully!",
|
||||
flag.StringVar(&messageTemplateUncordon, "message-template-uncordon", "Node %s rebooted & uncordoned successfully!",
|
||||
"message template used to notify about a node being successfully uncordoned")
|
||||
rootCmd.PersistentFlags().StringVar(&messageTemplateDrain, "message-template-drain", "Draining node %s",
|
||||
flag.StringVar(&messageTemplateDrain, "message-template-drain", "Draining node %s",
|
||||
"message template used to notify about a node being drained")
|
||||
rootCmd.PersistentFlags().StringVar(&messageTemplateReboot, "message-template-reboot", "Rebooting node %s",
|
||||
flag.StringVar(&messageTemplateReboot, "message-template-reboot", "Rebooting node %s",
|
||||
"message template used to notify about a node being rebooted")
|
||||
|
||||
rootCmd.PersistentFlags().StringArrayVar(&podSelectors, "blocking-pod-selector", nil,
|
||||
flag.StringArrayVar(&podSelectors, "blocking-pod-selector", nil,
|
||||
"label selector identifying pods whose presence should prevent reboots")
|
||||
|
||||
rootCmd.PersistentFlags().StringSliceVar(&rebootDays, "reboot-days", timewindow.EveryDay,
|
||||
flag.StringSliceVar(&rebootDays, "reboot-days", timewindow.EveryDay,
|
||||
"schedule reboot on these days")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootStart, "start-time", "0:00",
|
||||
flag.StringVar(&rebootStart, "start-time", "0:00",
|
||||
"schedule reboot only after this time of day")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootEnd, "end-time", "23:59:59",
|
||||
flag.StringVar(&rebootEnd, "end-time", "23:59:59",
|
||||
"schedule reboot only before this time of day")
|
||||
rootCmd.PersistentFlags().StringVar(&timezone, "time-zone", "UTC",
|
||||
flag.StringVar(&timezone, "time-zone", "UTC",
|
||||
"use this timezone for schedule inputs")
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&annotateNodes, "annotate-nodes", false,
|
||||
flag.BoolVar(&annotateNodes, "annotate-nodes", false,
|
||||
"if set, the annotations 'weave.works/kured-reboot-in-progress' and 'weave.works/kured-most-recent-reboot-needed' will be given to nodes undergoing kured reboots")
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text",
|
||||
flag.StringVar(&logFormat, "log-format", "text",
|
||||
"use text or json log format")
|
||||
|
||||
rootCmd.PersistentFlags().StringSliceVar(&preRebootNodeLabels, "pre-reboot-node-labels", nil,
|
||||
flag.StringSliceVar(&preRebootNodeLabels, "pre-reboot-node-labels", nil,
|
||||
"labels to add to nodes before cordoning")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&postRebootNodeLabels, "post-reboot-node-labels", nil,
|
||||
flag.StringSliceVar(&postRebootNodeLabels, "post-reboot-node-labels", nil,
|
||||
"labels to add to nodes after uncordoning")
|
||||
|
||||
return rootCmd
|
||||
flag.Parse()
|
||||
|
||||
// Load flags from environment variables
|
||||
LoadFromEnv()
|
||||
|
||||
log.Infof("Kubernetes Reboot Daemon: %s", version)
|
||||
|
||||
if logFormat == "json" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
}
|
||||
|
||||
if nodeID == "" {
|
||||
log.Fatal("KURED_NODE_ID environment variable required")
|
||||
}
|
||||
log.Infof("Node ID: %s", nodeID)
|
||||
|
||||
notifyURL = validateNotificationURL(notifyURL, slackHookURL)
|
||||
|
||||
err := validateNodeLabels(preRebootNodeLabels, postRebootNodeLabels)
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
}
|
||||
|
||||
log.Infof("PreferNoSchedule taint: %s", preferNoScheduleTaintName)
|
||||
|
||||
// This should be printed from blocker list instead of only blocking pod selectors
|
||||
log.Infof("Blocking Pod Selectors: %v", podSelectors)
|
||||
|
||||
log.Infof("Reboot period %v", period)
|
||||
log.Infof("Concurrency: %v", concurrency)
|
||||
|
||||
if annotateNodes {
|
||||
log.Infof("Will annotate nodes during kured reboot operations")
|
||||
}
|
||||
|
||||
// Now call the rest of the main loop.
|
||||
window, err := timewindow.New(rebootDays, rebootStart, rebootEnd, timezone)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to build time window: %v", err)
|
||||
}
|
||||
log.Infof("Reboot schedule: %v", window)
|
||||
|
||||
log.Infof("Reboot method: %s", rebootMethod)
|
||||
rebooter, err := internal.NewRebooter(rebootMethod, rebootCommand, rebootSignal)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to build rebooter: %v", err)
|
||||
}
|
||||
|
||||
rebootChecker, err := internal.NewRebootChecker(rebootSentinelCommand, rebootSentinelFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to build reboot checker: %v", err)
|
||||
}
|
||||
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var blockCheckers []blockers.RebootBlocker
|
||||
if prometheusURL != "" {
|
||||
blockCheckers = append(blockCheckers, blockers.NewPrometheusBlockingChecker(papi.Config{Address: prometheusURL}, alertFilter.Regexp, alertFiringOnly, alertFilterMatchOnly))
|
||||
}
|
||||
if podSelectors != nil {
|
||||
blockCheckers = append(blockCheckers, blockers.NewKubernetesBlockingChecker(client, nodeID, podSelectors))
|
||||
}
|
||||
log.Infof("Lock Annotation: %s/%s:%s", dsNamespace, dsName, lockAnnotation)
|
||||
if lockTTL > 0 {
|
||||
log.Infof("Lock TTL set, lock will expire after: %v", lockTTL)
|
||||
} else {
|
||||
log.Info("Lock TTL not set, lock will remain until being released")
|
||||
}
|
||||
if lockReleaseDelay > 0 {
|
||||
log.Infof("Lock release delay set, lock release will be delayed by: %v", lockReleaseDelay)
|
||||
} else {
|
||||
log.Info("Lock release delay not set, lock will be released immediately after rebooting")
|
||||
}
|
||||
lock := daemonsetlock.New(client, nodeID, dsNamespace, dsName, lockAnnotation, lockTTL, concurrency, lockReleaseDelay)
|
||||
|
||||
go rebootAsRequired(nodeID, rebooter, rebootChecker, blockCheckers, window, lock, client)
|
||||
go maintainRebootRequiredMetric(nodeID, rebootChecker)
|
||||
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), nil)) // #nosec G114
|
||||
}
|
||||
|
||||
// func that checks for deprecated slack-notification-related flags and node labels that do not match
|
||||
func flagCheck(cmd *cobra.Command, args []string) {
|
||||
if slackHookURL != "" && notifyURL != "" {
|
||||
log.Warnf("Cannot use both --notify-url and --slack-hook-url flags. Kured will use --notify-url flag only...")
|
||||
}
|
||||
if notifyURL != "" {
|
||||
notifyURL = stripQuotes(notifyURL)
|
||||
} else if slackHookURL != "" {
|
||||
slackHookURL = stripQuotes(slackHookURL)
|
||||
log.Warnf("Deprecated flag(s). Please use --notify-url flag instead.")
|
||||
trataURL, err := url.Parse(slackHookURL)
|
||||
if err != nil {
|
||||
log.Warnf("slack-hook-url is not properly formatted... no notification will be sent: %v\n", err)
|
||||
}
|
||||
if len(strings.Split(strings.Trim(trataURL.Path, "/services/"), "/")) != 3 {
|
||||
log.Warnf("slack-hook-url is not properly formatted... no notification will be sent: unexpected number of / in URL\n")
|
||||
} else {
|
||||
notifyURL = fmt.Sprintf("slack://%s", strings.Trim(trataURL.Path, "/services/"))
|
||||
}
|
||||
}
|
||||
func validateNodeLabels(preRebootNodeLabels []string, postRebootNodeLabels []string) error {
|
||||
var preRebootNodeLabelKeys, postRebootNodeLabelKeys []string
|
||||
for _, label := range preRebootNodeLabels {
|
||||
preRebootNodeLabelKeys = append(preRebootNodeLabelKeys, strings.Split(label, "=")[0])
|
||||
@@ -263,8 +303,103 @@ func flagCheck(cmd *cobra.Command, args []string) {
|
||||
sort.Strings(preRebootNodeLabelKeys)
|
||||
sort.Strings(postRebootNodeLabelKeys)
|
||||
if !reflect.DeepEqual(preRebootNodeLabelKeys, postRebootNodeLabelKeys) {
|
||||
log.Warnf("pre-reboot-node-labels keys and post-reboot-node-labels keys do not match. This may result in unexpected behaviour.")
|
||||
return fmt.Errorf("pre-reboot-node-labels keys and post-reboot-node-labels keys do not match, resulting in unexpected behaviour")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateNotificationURL(notifyURL string, slackHookURL string) string {
|
||||
switch {
|
||||
case slackHookURL != "" && notifyURL != "":
|
||||
log.Warnf("Cannot use both --notify-url (given: %v) and --slack-hook-url (given: %v) flags. Kured will only use --notify-url flag", slackHookURL, notifyURL)
|
||||
return validateNotificationURL(notifyURL, "")
|
||||
case notifyURL != "":
|
||||
return stripQuotes(notifyURL)
|
||||
case slackHookURL != "":
|
||||
log.Warnf("Deprecated flag(s). Please use --notify-url flag instead.")
|
||||
parsedURL, err := url.Parse(stripQuotes(slackHookURL))
|
||||
if err != nil {
|
||||
log.Errorf("slack-hook-url is not properly formatted... no notification will be sent: %v\n", err)
|
||||
return ""
|
||||
}
|
||||
if len(strings.Split(strings.ReplaceAll(parsedURL.Path, "/services/", ""), "/")) != 3 {
|
||||
log.Errorf("slack-hook-url is not properly formatted... no notification will be sent: unexpected number of / in URL\n")
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("slack://%s", strings.ReplaceAll(parsedURL.Path, "/services/", ""))
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// LoadFromEnv attempts to load environment variables corresponding to flags.
|
||||
// It looks for an environment variable with the uppercase version of the flag name (prefixed by EnvPrefix).
|
||||
func LoadFromEnv() {
|
||||
flag.VisitAll(func(f *flag.Flag) {
|
||||
envVarName := fmt.Sprintf("%s_%s", EnvPrefix, strings.ToUpper(strings.ReplaceAll(f.Name, "-", "_")))
|
||||
|
||||
if envValue, exists := os.LookupEnv(envVarName); exists {
|
||||
switch f.Value.Type() {
|
||||
case "int":
|
||||
if parsedVal, err := strconv.Atoi(envValue); err == nil {
|
||||
err := flag.Set(f.Name, strconv.Itoa(parsedVal))
|
||||
if err != nil {
|
||||
fmt.Printf("cannot set flag %s from env var named %s", f.Name, envVarName)
|
||||
os.Exit(1)
|
||||
} // Set int flag
|
||||
} else {
|
||||
fmt.Printf("Invalid value for env var named %s", envVarName)
|
||||
os.Exit(1)
|
||||
}
|
||||
case "string":
|
||||
err := flag.Set(f.Name, envValue)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot set flag %s from env{%s}: %s\n", f.Name, envVarName, envValue)
|
||||
os.Exit(1)
|
||||
} // Set string flag
|
||||
case "bool":
|
||||
if parsedVal, err := strconv.ParseBool(envValue); err == nil {
|
||||
err := flag.Set(f.Name, strconv.FormatBool(parsedVal))
|
||||
if err != nil {
|
||||
fmt.Printf("cannot set flag %s from env{%s}: %s\n", f.Name, envVarName, envValue)
|
||||
os.Exit(1)
|
||||
} // Set boolean flag
|
||||
} else {
|
||||
fmt.Printf("Invalid value for %s: %s\n", envVarName, envValue)
|
||||
os.Exit(1)
|
||||
}
|
||||
case "duration":
|
||||
// Set duration from the environment variable (e.g., "1h30m")
|
||||
if _, err := time.ParseDuration(envValue); err == nil {
|
||||
err = flag.Set(f.Name, envValue)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot set flag %s from env{%s}: %s\n", f.Name, envVarName, envValue)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Invalid duration for %s: %s\n", envVarName, envValue)
|
||||
os.Exit(1)
|
||||
}
|
||||
case "regexp":
|
||||
// For regexp, set it from the environment variable
|
||||
err := flag.Set(f.Name, envValue)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot set flag %s from env{%s}: %s\n", f.Name, envVarName, envValue)
|
||||
os.Exit(1)
|
||||
}
|
||||
case "stringSlice":
|
||||
// For stringSlice, split the environment variable by commas and set it
|
||||
err := flag.Set(f.Name, envValue)
|
||||
if err != nil {
|
||||
fmt.Printf("cannot set flag %s from env{%s}: %s\n", f.Name, envVarName, envValue)
|
||||
os.Exit(1)
|
||||
}
|
||||
default:
|
||||
fmt.Printf("Unsupported flag type for %s\n", f.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// stripQuotes removes any literal single or double quote chars that surround a string
|
||||
@@ -280,218 +415,6 @@ func stripQuotes(str string) string {
|
||||
return str
|
||||
}
|
||||
|
||||
// bindViper initializes viper and binds command flags with environment variables
|
||||
func bindViper(cmd *cobra.Command, args []string) error {
|
||||
v := viper.New()
|
||||
|
||||
v.SetEnvPrefix(EnvPrefix)
|
||||
v.AutomaticEnv()
|
||||
bindFlags(cmd, v)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// bindFlags binds each cobra flag to its associated viper configuration (environment variable)
|
||||
func bindFlags(cmd *cobra.Command, v *viper.Viper) {
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
// Environment variables can't have dashes in them, so bind them to their equivalent keys with underscores
|
||||
if strings.Contains(f.Name, "-") {
|
||||
v.BindEnv(f.Name, flagToEnvVar(f.Name))
|
||||
}
|
||||
|
||||
// Apply the viper config value to the flag when the flag is not set and viper has a value
|
||||
if !f.Changed && v.IsSet(f.Name) {
|
||||
val := v.Get(f.Name)
|
||||
log.Infof("Binding %s command flag to environment variable: %s", f.Name, flagToEnvVar(f.Name))
|
||||
cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// flagToEnvVar converts command flag name to equivalent environment variable name
|
||||
func flagToEnvVar(flag string) string {
|
||||
envVarSuffix := strings.ToUpper(strings.ReplaceAll(flag, "-", "_"))
|
||||
return fmt.Sprintf("%s_%s", EnvPrefix, envVarSuffix)
|
||||
}
|
||||
|
||||
// buildHostCommand writes a new command to run in the host namespace
|
||||
// Rancher based need different pid
|
||||
func buildHostCommand(pid int, command []string) []string {
|
||||
|
||||
// From the container, we nsenter into the proper PID to run the hostCommand.
|
||||
// For this, kured daemonset need to be configured with hostPID:true and privileged:true
|
||||
cmd := []string{"/usr/bin/nsenter", fmt.Sprintf("-m/proc/%d/ns/mnt", pid), "--"}
|
||||
cmd = append(cmd, command...)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func rebootRequired(sentinelCommand []string) bool {
|
||||
cmd := util.NewCommand(sentinelCommand[0], sentinelCommand[1:]...)
|
||||
if err := cmd.Run(); err != nil {
|
||||
switch err := err.(type) {
|
||||
case *exec.ExitError:
|
||||
// We assume a non-zero exit code means 'reboot not required', but of course
|
||||
// the user could have misconfigured the sentinel command or something else
|
||||
// went wrong during its execution. In that case, not entering a reboot loop
|
||||
// is the right thing to do, and we are logging stdout/stderr of the command
|
||||
// so it should be obvious what is wrong.
|
||||
if cmd.ProcessState.ExitCode() != 1 {
|
||||
log.Warnf("sentinel command ended with unexpected exit code: %v", cmd.ProcessState.ExitCode())
|
||||
}
|
||||
return false
|
||||
default:
|
||||
// Something was grossly misconfigured, such as the command path being wrong.
|
||||
log.Fatalf("Error invoking sentinel command: %v", err)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RebootBlocker interface should be implemented by types
|
||||
// to know if their instantiations should block a reboot
|
||||
type RebootBlocker interface {
|
||||
isBlocked() bool
|
||||
}
|
||||
|
||||
// PrometheusBlockingChecker contains info for connecting
|
||||
// to prometheus, and can give info about whether a reboot should be blocked
|
||||
type PrometheusBlockingChecker struct {
|
||||
// prometheusClient to make prometheus-go-client and api config available
|
||||
// into the PrometheusBlockingChecker struct
|
||||
promClient *alerts.PromClient
|
||||
// regexp used to get alerts
|
||||
filter *regexp.Regexp
|
||||
// bool to indicate if only firing alerts should be considered
|
||||
firingOnly bool
|
||||
// bool to indicate that we're only blocking on alerts which match the filter
|
||||
filterMatchOnly bool
|
||||
}
|
||||
|
||||
// KubernetesBlockingChecker contains info for connecting
|
||||
// to k8s, and can give info about whether a reboot should be blocked
|
||||
type KubernetesBlockingChecker struct {
|
||||
// client used to contact kubernetes API
|
||||
client *kubernetes.Clientset
|
||||
nodename string
|
||||
// lised used to filter pods (podSelector)
|
||||
filter []string
|
||||
}
|
||||
|
||||
func (pb PrometheusBlockingChecker) isBlocked() bool {
|
||||
alertNames, err := pb.promClient.ActiveAlerts(pb.filter, pb.firingOnly, pb.filterMatchOnly)
|
||||
if err != nil {
|
||||
log.Warnf("Reboot blocked: prometheus query error: %v", err)
|
||||
return true
|
||||
}
|
||||
count := len(alertNames)
|
||||
if count > 10 {
|
||||
alertNames = append(alertNames[:10], "...")
|
||||
}
|
||||
if count > 0 {
|
||||
log.Warnf("Reboot blocked: %d active alerts: %v", count, alertNames)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (kb KubernetesBlockingChecker) isBlocked() bool {
|
||||
fieldSelector := fmt.Sprintf("spec.nodeName=%s,status.phase!=Succeeded,status.phase!=Failed,status.phase!=Unknown", kb.nodename)
|
||||
for _, labelSelector := range kb.filter {
|
||||
podList, err := kb.client.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
FieldSelector: fieldSelector,
|
||||
Limit: 10})
|
||||
if err != nil {
|
||||
log.Warnf("Reboot blocked: pod query error: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(podList.Items) > 0 {
|
||||
podNames := make([]string, 0, len(podList.Items))
|
||||
for _, pod := range podList.Items {
|
||||
podNames = append(podNames, pod.Name)
|
||||
}
|
||||
if len(podList.Continue) > 0 {
|
||||
podNames = append(podNames, "...")
|
||||
}
|
||||
log.Warnf("Reboot blocked: matching pods: %v", podNames)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func rebootBlocked(blockers ...RebootBlocker) bool {
|
||||
for _, blocker := range blockers {
|
||||
if blocker.isBlocked() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func holding(lock *daemonsetlock.DaemonSetLock, metadata interface{}, isMultiLock bool) bool {
|
||||
var holding bool
|
||||
var err error
|
||||
if isMultiLock {
|
||||
holding, err = lock.TestMultiple()
|
||||
} else {
|
||||
holding, err = lock.Test(metadata)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Error testing lock: %v", err)
|
||||
}
|
||||
if holding {
|
||||
log.Infof("Holding lock")
|
||||
}
|
||||
return holding
|
||||
}
|
||||
|
||||
func acquire(lock *daemonsetlock.DaemonSetLock, metadata interface{}, TTL time.Duration, maxOwners int) bool {
|
||||
var holding bool
|
||||
var holder string
|
||||
var err error
|
||||
if maxOwners > 1 {
|
||||
var holders []string
|
||||
holding, holders, err = lock.AcquireMultiple(metadata, TTL, maxOwners)
|
||||
holder = strings.Join(holders, ",")
|
||||
} else {
|
||||
holding, holder, err = lock.Acquire(metadata, TTL)
|
||||
}
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Fatalf("Error acquiring lock: %v", err)
|
||||
return false
|
||||
case !holding:
|
||||
log.Warnf("Lock already held: %v", holder)
|
||||
return false
|
||||
default:
|
||||
log.Infof("Acquired reboot lock")
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func throttle(releaseDelay time.Duration) {
|
||||
if releaseDelay > 0 {
|
||||
log.Infof("Delaying lock release by %v", releaseDelay)
|
||||
time.Sleep(releaseDelay)
|
||||
}
|
||||
}
|
||||
|
||||
func release(lock *daemonsetlock.DaemonSetLock, isMultiLock bool) {
|
||||
log.Infof("Releasing lock")
|
||||
|
||||
var err error
|
||||
if isMultiLock {
|
||||
err = lock.ReleaseMultiple()
|
||||
} else {
|
||||
err = lock.Release()
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Error releasing lock: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func drain(client *kubernetes.Clientset, node *v1.Node) error {
|
||||
nodename := node.GetName()
|
||||
|
||||
@@ -556,9 +479,9 @@ func uncordon(client *kubernetes.Clientset, node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func maintainRebootRequiredMetric(nodeID string, sentinelCommand []string) {
|
||||
func maintainRebootRequiredMetric(nodeID string, checker checkers.Checker) {
|
||||
for {
|
||||
if rebootRequired(sentinelCommand) {
|
||||
if checker.RebootRequired() {
|
||||
rebootRequiredGauge.WithLabelValues(nodeID).Set(1)
|
||||
} else {
|
||||
rebootRequiredGauge.WithLabelValues(nodeID).Set(0)
|
||||
@@ -567,11 +490,6 @@ func maintainRebootRequiredMetric(nodeID string, sentinelCommand []string) {
|
||||
}
|
||||
}
|
||||
|
||||
// nodeMeta is used to remember information across reboots
|
||||
type nodeMeta struct {
|
||||
Unschedulable bool `json:"unschedulable"`
|
||||
}
|
||||
|
||||
func addNodeAnnotations(client *kubernetes.Clientset, nodeID string, annotations map[string]string) error {
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeID, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -646,39 +564,32 @@ func updateNodeLabels(client *kubernetes.Clientset, node *v1.Node, labels []stri
|
||||
}
|
||||
}
|
||||
|
||||
func rebootAsRequired(nodeID string, booter reboot.Reboot, sentinelCommand []string, window *timewindow.TimeWindow, TTL time.Duration, releaseDelay time.Duration) {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
func rebootAsRequired(nodeID string, rebooter reboot.Rebooter, checker checkers.Checker, blockCheckers []blockers.RebootBlocker, window *timewindow.TimeWindow, lock daemonsetlock.Lock, client *kubernetes.Clientset) {
|
||||
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
lock := daemonsetlock.New(client, nodeID, dsNamespace, dsName, lockAnnotation)
|
||||
|
||||
nodeMeta := nodeMeta{}
|
||||
source := rand.NewSource(time.Now().UnixNano())
|
||||
tick := delaytick.New(source, 1*time.Minute)
|
||||
for range tick {
|
||||
if holding(lock, &nodeMeta, concurrency > 1) {
|
||||
holding, lockData, err := lock.Holding()
|
||||
if err != nil {
|
||||
log.Errorf("Error testing lock: %v", err)
|
||||
}
|
||||
if holding {
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeID, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("Error retrieving node object via k8s API: %v", err)
|
||||
continue
|
||||
}
|
||||
if !nodeMeta.Unschedulable {
|
||||
|
||||
if !lockData.Metadata.Unschedulable {
|
||||
err = uncordon(client, node)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to uncordon %s: %v, will continue to hold lock and retry uncordon", node.GetName(), err)
|
||||
continue
|
||||
} else {
|
||||
if notifyURL != "" {
|
||||
if err := shoutrrr.Send(notifyURL, fmt.Sprintf(messageTemplateUncordon, nodeID)); err != nil {
|
||||
log.Warnf("Error notifying: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if notifyURL != "" {
|
||||
if err := shoutrrr.Send(notifyURL, fmt.Sprintf(messageTemplateUncordon, nodeID)); err != nil {
|
||||
log.Warnf("Error notifying: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -687,7 +598,7 @@ func rebootAsRequired(nodeID string, booter reboot.Reboot, sentinelCommand []str
|
||||
// And (2) check if we previously annotated the node that it was in the process of being rebooted,
|
||||
// And finally (3) if it has that annotation, to delete it.
|
||||
// This indicates to other node tools running on the cluster that this node may be a candidate for maintenance
|
||||
if annotateNodes && !rebootRequired(sentinelCommand) {
|
||||
if annotateNodes && !checker.RebootRequired() {
|
||||
if _, ok := node.Annotations[KuredRebootInProgressAnnotation]; ok {
|
||||
err := deleteNodeAnnotation(client, nodeID, KuredRebootInProgressAnnotation)
|
||||
if err != nil {
|
||||
@@ -695,27 +606,23 @@ func rebootAsRequired(nodeID string, booter reboot.Reboot, sentinelCommand []str
|
||||
}
|
||||
}
|
||||
}
|
||||
throttle(releaseDelay)
|
||||
release(lock, concurrency > 1)
|
||||
break
|
||||
} else {
|
||||
break
|
||||
|
||||
err = lock.Release()
|
||||
if err != nil {
|
||||
log.Errorf("Error releasing lock, will retry: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
preferNoScheduleTaint := taints.New(client, nodeID, preferNoScheduleTaintName, v1.TaintEffectPreferNoSchedule)
|
||||
|
||||
// Remove taint immediately during startup to quickly allow scheduling again.
|
||||
if !rebootRequired(sentinelCommand) {
|
||||
if !checker.RebootRequired() {
|
||||
preferNoScheduleTaint.Disable()
|
||||
}
|
||||
|
||||
// instantiate prometheus client
|
||||
promClient, err := alerts.NewPromClient(papi.Config{Address: prometheusURL})
|
||||
if err != nil {
|
||||
log.Fatal("Unable to create prometheus client: ", err)
|
||||
}
|
||||
|
||||
source = rand.NewSource(time.Now().UnixNano())
|
||||
tick = delaytick.New(source, period)
|
||||
for range tick {
|
||||
@@ -725,7 +632,7 @@ func rebootAsRequired(nodeID string, booter reboot.Reboot, sentinelCommand []str
|
||||
continue
|
||||
}
|
||||
|
||||
if !rebootRequired(sentinelCommand) {
|
||||
if !checker.RebootRequired() {
|
||||
log.Infof("Reboot not required")
|
||||
preferNoScheduleTaint.Disable()
|
||||
continue
|
||||
@@ -735,7 +642,8 @@ func rebootAsRequired(nodeID string, booter reboot.Reboot, sentinelCommand []str
|
||||
if err != nil {
|
||||
log.Fatalf("Error retrieving node object via k8s API: %v", err)
|
||||
}
|
||||
nodeMeta.Unschedulable = node.Spec.Unschedulable
|
||||
|
||||
nodeMeta := daemonsetlock.NodeMeta{Unschedulable: node.Spec.Unschedulable}
|
||||
|
||||
var timeNowString string
|
||||
if annotateNodes {
|
||||
@@ -753,34 +661,44 @@ func rebootAsRequired(nodeID string, booter reboot.Reboot, sentinelCommand []str
|
||||
}
|
||||
}
|
||||
|
||||
var blockCheckers []RebootBlocker
|
||||
if prometheusURL != "" {
|
||||
blockCheckers = append(blockCheckers, PrometheusBlockingChecker{promClient: promClient, filter: alertFilter, firingOnly: alertFiringOnly, filterMatchOnly: alertFilterMatchOnly})
|
||||
}
|
||||
if podSelectors != nil {
|
||||
blockCheckers = append(blockCheckers, KubernetesBlockingChecker{client: client, nodename: nodeID, filter: podSelectors})
|
||||
}
|
||||
|
||||
var rebootRequiredBlockCondition string
|
||||
if rebootBlocked(blockCheckers...) {
|
||||
if blockers.RebootBlocked(blockCheckers...) {
|
||||
rebootRequiredBlockCondition = ", but blocked at this time"
|
||||
continue
|
||||
}
|
||||
log.Infof("Reboot required%s", rebootRequiredBlockCondition)
|
||||
|
||||
if !holding(lock, &nodeMeta, concurrency > 1) && !acquire(lock, &nodeMeta, TTL, concurrency) {
|
||||
// Prefer to not schedule pods onto this node to avoid draing the same pod multiple times.
|
||||
preferNoScheduleTaint.Enable()
|
||||
continue
|
||||
holding, _, err := lock.Holding()
|
||||
if err != nil {
|
||||
log.Errorf("Error testing lock: %v", err)
|
||||
}
|
||||
|
||||
if !holding {
|
||||
acquired, holder, err := lock.Acquire(nodeMeta)
|
||||
if err != nil {
|
||||
log.Errorf("Error acquiring lock: %v", err)
|
||||
}
|
||||
if !acquired {
|
||||
log.Warnf("Lock already held: %v", holder)
|
||||
// Prefer to not schedule pods onto this node to avoid draing the same pod multiple times.
|
||||
preferNoScheduleTaint.Enable()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
err = drain(client, node)
|
||||
if err != nil {
|
||||
if !forceReboot {
|
||||
log.Errorf("Unable to cordon or drain %s: %v, will release lock and retry cordon and drain before rebooting when lock is next acquired", node.GetName(), err)
|
||||
release(lock, concurrency > 1)
|
||||
err = lock.Release()
|
||||
if err != nil {
|
||||
log.Errorf("Error releasing lock: %v", err)
|
||||
}
|
||||
log.Infof("Performing a best-effort uncordon after failed cordon and drain")
|
||||
uncordon(client, node)
|
||||
err := uncordon(client, node)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to uncordon %s: %v", node.GetName(), err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -795,108 +713,15 @@ func rebootAsRequired(nodeID string, booter reboot.Reboot, sentinelCommand []str
|
||||
log.Warnf("Error notifying: %v", err)
|
||||
}
|
||||
}
|
||||
log.Infof("Triggering reboot for node %v", nodeID)
|
||||
|
||||
booter.Reboot()
|
||||
err = rebooter.Reboot()
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to reboot node: %v", err)
|
||||
}
|
||||
for {
|
||||
log.Infof("Waiting for reboot")
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildSentinelCommand creates the shell command line which will need wrapping to escape
|
||||
// the container boundaries
|
||||
func buildSentinelCommand(rebootSentinelFile string, rebootSentinelCommand string) []string {
|
||||
if rebootSentinelCommand != "" {
|
||||
cmd, err := shlex.Split(rebootSentinelCommand)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing provided sentinel command: %v", err)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
return []string{"test", "-f", rebootSentinelFile}
|
||||
}
|
||||
|
||||
// parseRebootCommand creates the shell command line which will need wrapping to escape
|
||||
// the container boundaries
|
||||
func parseRebootCommand(rebootCommand string) []string {
|
||||
command, err := shlex.Split(rebootCommand)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing provided reboot command: %v", err)
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
func root(cmd *cobra.Command, args []string) {
|
||||
if logFormat == "json" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
}
|
||||
|
||||
log.Infof("Kubernetes Reboot Daemon: %s", version)
|
||||
|
||||
if nodeID == "" {
|
||||
log.Fatal("KURED_NODE_ID environment variable required")
|
||||
}
|
||||
|
||||
window, err := timewindow.New(rebootDays, rebootStart, rebootEnd, timezone)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to build time window: %v", err)
|
||||
}
|
||||
|
||||
sentinelCommand := buildSentinelCommand(rebootSentinelFile, rebootSentinelCommand)
|
||||
restartCommand := parseRebootCommand(rebootCommand)
|
||||
|
||||
log.Infof("Node ID: %s", nodeID)
|
||||
log.Infof("Lock Annotation: %s/%s:%s", dsNamespace, dsName, lockAnnotation)
|
||||
if lockTTL > 0 {
|
||||
log.Infof("Lock TTL set, lock will expire after: %v", lockTTL)
|
||||
} else {
|
||||
log.Info("Lock TTL not set, lock will remain until being released")
|
||||
}
|
||||
if lockReleaseDelay > 0 {
|
||||
log.Infof("Lock release delay set, lock release will be delayed by: %v", lockReleaseDelay)
|
||||
} else {
|
||||
log.Info("Lock release delay not set, lock will be released immediately after rebooting")
|
||||
}
|
||||
log.Infof("PreferNoSchedule taint: %s", preferNoScheduleTaintName)
|
||||
log.Infof("Blocking Pod Selectors: %v", podSelectors)
|
||||
log.Infof("Reboot schedule: %v", window)
|
||||
log.Infof("Reboot check command: %s every %v", sentinelCommand, period)
|
||||
log.Infof("Concurrency: %v", concurrency)
|
||||
log.Infof("Reboot method: %s", rebootMethod)
|
||||
if rebootCommand == MethodCommand {
|
||||
log.Infof("Reboot command: %s", restartCommand)
|
||||
} else {
|
||||
log.Infof("Reboot signal: %v", rebootSignal)
|
||||
}
|
||||
|
||||
if annotateNodes {
|
||||
log.Infof("Will annotate nodes during kured reboot operations")
|
||||
}
|
||||
|
||||
// To run those commands as it was the host, we'll use nsenter
|
||||
// Relies on hostPID:true and privileged:true to enter host mount space
|
||||
// PID set to 1, until we have a better discovery mechanism.
|
||||
hostRestartCommand := buildHostCommand(1, restartCommand)
|
||||
|
||||
// Only wrap sentinel-command with nsenter, if a custom-command was configured, otherwise use the host-path mount
|
||||
hostSentinelCommand := sentinelCommand
|
||||
if rebootSentinelCommand != "" {
|
||||
hostSentinelCommand = buildHostCommand(1, sentinelCommand)
|
||||
}
|
||||
|
||||
var booter reboot.Reboot
|
||||
if rebootMethod == MethodCommand {
|
||||
booter = reboot.NewCommandReboot(nodeID, hostRestartCommand)
|
||||
} else if rebootMethod == MethodSignal {
|
||||
booter = reboot.NewSignalReboot(nodeID, rebootSignal)
|
||||
} else {
|
||||
log.Fatalf("Invalid reboot-method configured: %s", rebootMethod)
|
||||
}
|
||||
|
||||
go rebootAsRequired(nodeID, booter, hostSentinelCommand, window, lockTTL, lockReleaseDelay)
|
||||
go maintainRebootRequiredMetric(nodeID, hostSentinelCommand)
|
||||
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(fmt.Sprintf("%s:%d", metricsHost, metricsPort), nil))
|
||||
}
|
||||
|
||||
@@ -3,61 +3,29 @@ package main
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/kubereboot/kured/pkg/alerts"
|
||||
assert "gotest.tools/v3/assert"
|
||||
|
||||
papi "github.com/prometheus/client_golang/api"
|
||||
)
|
||||
|
||||
type BlockingChecker struct {
|
||||
blocking bool
|
||||
}
|
||||
func TestValidateNotificationURL(t *testing.T) {
|
||||
|
||||
func (fbc BlockingChecker) isBlocked() bool {
|
||||
return fbc.blocking
|
||||
}
|
||||
|
||||
var _ RebootBlocker = BlockingChecker{} // Verify that Type implements Interface.
|
||||
var _ RebootBlocker = (*BlockingChecker)(nil) // Verify that *Type implements Interface.
|
||||
|
||||
func Test_flagCheck(t *testing.T) {
|
||||
var cmd *cobra.Command
|
||||
var args []string
|
||||
slackHookURL = "https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
|
||||
expected := "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
tests := []struct {
|
||||
name string
|
||||
slackHookURL string
|
||||
notifyURL string
|
||||
expected string
|
||||
}{
|
||||
{"slackHookURL only works fine", "https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET", "", "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"},
|
||||
{"slackHookURL and notify URL together only keeps notifyURL", "\"https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET\"", "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com", "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"},
|
||||
{"slackHookURL removes extraneous double quotes", "\"https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET\"", "", "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"},
|
||||
{"slackHookURL removes extraneous single quotes", "'https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET'", "", "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"},
|
||||
{"notifyURL removes extraneous double quotes", "", "\"teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com\"", "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"},
|
||||
{"notifyURL removes extraneous single quotes", "", "'teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com'", "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"},
|
||||
}
|
||||
|
||||
// validate that surrounding quotes are stripped
|
||||
slackHookURL = "\"https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET\""
|
||||
expected = "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
}
|
||||
slackHookURL = "'https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET'"
|
||||
expected = "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
}
|
||||
slackHookURL = ""
|
||||
notifyURL = "\"teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com\""
|
||||
expected = "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("notifyURL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
}
|
||||
notifyURL = "'teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com'"
|
||||
expected = "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("notifyURL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := validateNotificationURL(tt.notifyURL, tt.slackHookURL); !reflect.DeepEqual(got, tt.expected) {
|
||||
t.Errorf("validateNotificationURL() = %v, expected %v", got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,205 +74,3 @@ func Test_stripQuotes(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_rebootBlocked(t *testing.T) {
|
||||
noCheckers := []RebootBlocker{}
|
||||
nonblockingChecker := BlockingChecker{blocking: false}
|
||||
blockingChecker := BlockingChecker{blocking: true}
|
||||
|
||||
// Instantiate a prometheusClient with a broken_url
|
||||
promClient, err := alerts.NewPromClient(papi.Config{Address: "broken_url"})
|
||||
if err != nil {
|
||||
log.Fatal("Can't create prometheusClient: ", err)
|
||||
}
|
||||
brokenPrometheusClient := PrometheusBlockingChecker{promClient: promClient, filter: nil, firingOnly: false}
|
||||
|
||||
type args struct {
|
||||
blockers []RebootBlocker
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Do not block on no blocker defined",
|
||||
args: args{blockers: noCheckers},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Ensure a blocker blocks",
|
||||
args: args{blockers: []RebootBlocker{blockingChecker}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Ensure a non-blocker doesn't block",
|
||||
args: args{blockers: []RebootBlocker{nonblockingChecker}},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Ensure one blocker is enough to block",
|
||||
args: args{blockers: []RebootBlocker{nonblockingChecker, blockingChecker}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Do block on error contacting prometheus API",
|
||||
args: args{blockers: []RebootBlocker{brokenPrometheusClient}},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := rebootBlocked(tt.args.blockers...); got != tt.want {
|
||||
t.Errorf("rebootBlocked() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildHostCommand(t *testing.T) {
|
||||
type args struct {
|
||||
pid int
|
||||
command []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Ensure command will run with nsenter",
|
||||
args: args{pid: 1, command: []string{"ls", "-Fal"}},
|
||||
want: []string{"/usr/bin/nsenter", "-m/proc/1/ns/mnt", "--", "ls", "-Fal"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := buildHostCommand(tt.args.pid, tt.args.command); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("buildHostCommand() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildSentinelCommand(t *testing.T) {
|
||||
type args struct {
|
||||
rebootSentinelFile string
|
||||
rebootSentinelCommand string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Ensure a sentinelFile generates a shell 'test' command with the right file",
|
||||
args: args{
|
||||
rebootSentinelFile: "/test1",
|
||||
rebootSentinelCommand: "",
|
||||
},
|
||||
want: []string{"test", "-f", "/test1"},
|
||||
},
|
||||
{
|
||||
name: "Ensure a sentinelCommand has priority over a sentinelFile if both are provided (because sentinelFile is always provided)",
|
||||
args: args{
|
||||
rebootSentinelFile: "/test1",
|
||||
rebootSentinelCommand: "/sbin/reboot-required -r",
|
||||
},
|
||||
want: []string{"/sbin/reboot-required", "-r"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := buildSentinelCommand(tt.args.rebootSentinelFile, tt.args.rebootSentinelCommand); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("buildSentinelCommand() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseRebootCommand(t *testing.T) {
|
||||
type args struct {
|
||||
rebootCommand string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Ensure a reboot command is properly parsed",
|
||||
args: args{
|
||||
rebootCommand: "/sbin/systemctl reboot",
|
||||
},
|
||||
want: []string{"/sbin/systemctl", "reboot"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := parseRebootCommand(tt.args.rebootCommand); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("parseRebootCommand() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_rebootRequired(t *testing.T) {
|
||||
type args struct {
|
||||
sentinelCommand []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Ensure rc = 0 means reboot required",
|
||||
args: args{
|
||||
sentinelCommand: []string{"true"},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Ensure rc != 0 means reboot NOT required",
|
||||
args: args{
|
||||
sentinelCommand: []string{"false"},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := rebootRequired(tt.args.sentinelCommand); got != tt.want {
|
||||
t.Errorf("rebootRequired() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_rebootRequired_fatals(t *testing.T) {
|
||||
cases := []struct {
|
||||
param []string
|
||||
expectFatal bool
|
||||
}{
|
||||
{
|
||||
param: []string{"true"},
|
||||
expectFatal: false,
|
||||
},
|
||||
{
|
||||
param: []string{"./babar"},
|
||||
expectFatal: true,
|
||||
},
|
||||
}
|
||||
|
||||
defer func() { log.StandardLogger().ExitFunc = nil }()
|
||||
var fatal bool
|
||||
log.StandardLogger().ExitFunc = func(int) { fatal = true }
|
||||
|
||||
for _, c := range cases {
|
||||
fatal = false
|
||||
rebootRequired(c.param)
|
||||
assert.Equal(t, c.expectFatal, fatal)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -5,14 +5,14 @@ import (
|
||||
)
|
||||
|
||||
type regexpValue struct {
|
||||
value **regexp.Regexp
|
||||
*regexp.Regexp
|
||||
}
|
||||
|
||||
func (rev *regexpValue) String() string {
|
||||
if *rev.value == nil {
|
||||
if rev.Regexp == nil {
|
||||
return ""
|
||||
}
|
||||
return (*rev.value).String()
|
||||
return rev.Regexp.String()
|
||||
}
|
||||
|
||||
func (rev *regexpValue) Set(s string) error {
|
||||
@@ -20,12 +20,11 @@ func (rev *regexpValue) Set(s string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*rev.value = value
|
||||
|
||||
rev.Regexp = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type method returns the type of the flag as a string
|
||||
func (rev *regexpValue) Type() string {
|
||||
return "regexp.Regexp"
|
||||
return "regexp"
|
||||
}
|
||||
|
||||
125
go.mod
125
go.mod
@@ -1,109 +1,90 @@
|
||||
module github.com/kubereboot/kured
|
||||
|
||||
go 1.20
|
||||
|
||||
replace golang.org/x/net => golang.org/x/net v0.17.0
|
||||
|
||||
replace github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.10.2
|
||||
go 1.24.11
|
||||
|
||||
require (
|
||||
github.com/containrrr/shoutrrr v0.8.0
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/prometheus/common v0.46.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/common v0.67.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.18.2
|
||||
github.com/stretchr/testify v1.8.4
|
||||
gotest.tools/v3 v3.5.1
|
||||
k8s.io/api v0.28.4
|
||||
k8s.io/apimachinery v0.28.4
|
||||
k8s.io/client-go v0.28.4
|
||||
k8s.io/kubectl v0.28.4
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/stretchr/testify v1.11.1
|
||||
k8s.io/api v0.34.3
|
||||
k8s.io/apimachinery v0.34.3
|
||||
k8s.io/client-go v0.34.3
|
||||
k8s.io/kubectl v0.34.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/net v0.20.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/sync v0.5.0 // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
golang.org/x/term v0.13.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.32.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/net v0.48.0 // indirect
|
||||
golang.org/x/oauth2 v0.34.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
golang.org/x/term v0.38.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/cli-runtime v0.28.4 // indirect
|
||||
k8s.io/component-base v0.28.4 // indirect
|
||||
k8s.io/klog/v2 v2.100.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/cli-runtime v0.34.3 // indirect
|
||||
k8s.io/component-base v0.34.3 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
369
go.sum
369
go.sum
@@ -1,119 +1,96 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
|
||||
github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/containrrr/shoutrrr v0.8.0 h1:mfG2ATzIS7NR2Ec6XL+xyoHzN97H8WPjir8aYzJUSec=
|
||||
github.com/containrrr/shoutrrr v0.8.0/go.mod h1:ioyQAyu1LJY6sILuNyKaQaw+9Ttik5QePU8atnAdO2o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
|
||||
github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc=
|
||||
github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@@ -123,26 +100,28 @@ github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPn
|
||||
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@@ -150,191 +129,135 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
|
||||
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
|
||||
github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
|
||||
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
|
||||
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
|
||||
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
|
||||
k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
|
||||
k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8=
|
||||
k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
|
||||
k8s.io/cli-runtime v0.28.4 h1:IW3aqSNFXiGDllJF4KVYM90YX4cXPGxuCxCVqCD8X+Q=
|
||||
k8s.io/cli-runtime v0.28.4/go.mod h1:MLGRB7LWTIYyYR3d/DOgtUC8ihsAPA3P8K8FDNIqJ0k=
|
||||
k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY=
|
||||
k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4=
|
||||
k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo=
|
||||
k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU=
|
||||
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
||||
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||
k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ=
|
||||
k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c=
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY=
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U=
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=
|
||||
k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=
|
||||
k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
|
||||
k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/cli-runtime v0.34.3 h1:YRyMhiwX0dT9lmG0AtZDaeG33Nkxgt9OlCTZhRXj9SI=
|
||||
k8s.io/cli-runtime v0.34.3/go.mod h1:GVwL1L5uaGEgM7eGeKjaTG2j3u134JgG4dAI6jQKhMc=
|
||||
k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
|
||||
k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
|
||||
k8s.io/component-base v0.34.3 h1:zsEgw6ELqK0XncCQomgO9DpUIzlrYuZYA0Cgo+JWpVk=
|
||||
k8s.io/component-base v0.34.3/go.mod h1:5iIlD8wPfWE/xSHTRfbjuvUul2WZbI2nOUK65XL0E/c=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/kubectl v0.34.3 h1:vpM6//153gh5gvsYHXWHVJ4l4xmN5QFwTSmlfd8icm8=
|
||||
k8s.io/kubectl v0.34.3/go.mod h1:zZQHtIZoUqTP1bAnPzq/3W1jfc0NeOeunFgcswrfg1c=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=
|
||||
sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM=
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78=
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
41
internal/validators.go
Normal file
41
internal/validators.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Package internal provides convenient tools which shouldn't be in cmd/main
|
||||
// It will eventually provide internal validation and chaining logic to select
|
||||
// appropriate reboot and sentinel check methods based on configuration.
|
||||
// It validates user input and instantiates the correct checker and rebooter implementations
|
||||
// for use elsewhere in kured.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubereboot/kured/pkg/checkers"
|
||||
"github.com/kubereboot/kured/pkg/reboot"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewRebooter validates the rebootMethod, rebootCommand, and rebootSignal input,
|
||||
// then chains to the right constructor.
|
||||
func NewRebooter(rebootMethod string, rebootCommand string, rebootSignal int) (reboot.Rebooter, error) {
|
||||
switch rebootMethod {
|
||||
case "command":
|
||||
log.Infof("Reboot command: %s", rebootCommand)
|
||||
return reboot.NewCommandRebooter(rebootCommand)
|
||||
case "signal":
|
||||
log.Infof("Reboot signal: %d", rebootSignal)
|
||||
return reboot.NewSignalRebooter(rebootSignal)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid reboot-method configured %s, expected signal or command", rebootMethod)
|
||||
}
|
||||
}
|
||||
|
||||
// NewRebootChecker validates the rebootSentinelCommand, rebootSentinelFile input,
|
||||
// then chains to the right constructor.
|
||||
func NewRebootChecker(rebootSentinelCommand string, rebootSentinelFile string) (checkers.Checker, error) {
|
||||
// An override of rebootSentinelCommand means a privileged command
|
||||
if rebootSentinelCommand != "" {
|
||||
log.Infof("Sentinel checker is (privileged) user provided command: %s", rebootSentinelCommand)
|
||||
return checkers.NewCommandChecker(rebootSentinelCommand, 1, true)
|
||||
}
|
||||
log.Infof("Sentinel checker is (unprivileged) testing for the presence of: %s", rebootSentinelFile)
|
||||
return checkers.NewFileRebootChecker(rebootSentinelFile)
|
||||
}
|
||||
@@ -38,7 +38,7 @@ spec:
|
||||
- name: kured
|
||||
# If you find yourself here wondering why there is no
|
||||
# :latest tag on Docker Hub,see the FAQ in the README
|
||||
image: ghcr.io/kubereboot/kured:1.15.0
|
||||
image: ghcr.io/kubereboot/kured:1.21.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: false # Give permission to nsenter /proc/1/ns/mnt
|
||||
|
||||
@@ -38,7 +38,7 @@ spec:
|
||||
- name: kured
|
||||
# If you find yourself here wondering why there is no
|
||||
# :latest tag on Docker Hub,see the FAQ in the README
|
||||
image: ghcr.io/kubereboot/kured:1.15.0
|
||||
image: ghcr.io/kubereboot/kured:1.21.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true # Give permission to nsenter /proc/1/ns/mnt
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
package alerts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
papi "github.com/prometheus/client_golang/api"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// PromClient is a wrapper around the Prometheus Client interface and implements the api
|
||||
// This way, the PromClient can be instantiated with the configuration the Client needs, and
|
||||
// the ability to use the methods the api has, like Query and so on.
|
||||
type PromClient struct {
|
||||
papi papi.Client
|
||||
api v1.API
|
||||
}
|
||||
|
||||
// NewPromClient creates a new client to the Prometheus API.
|
||||
// It returns an error on any problem.
|
||||
func NewPromClient(conf papi.Config) (*PromClient, error) {
|
||||
promClient, err := papi.NewClient(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := PromClient{papi: promClient, api: v1.NewAPI(promClient)}
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// ActiveAlerts is a method of type PromClient, it returns a list of names of active alerts
|
||||
// (e.g. pending or firing), filtered by the supplied regexp or by the includeLabels query.
|
||||
// filter by regexp means when the regex finds the alert-name; the alert is exluded from the
|
||||
// block-list and will NOT block rebooting. query by includeLabel means,
|
||||
// if the query finds an alert, it will include it to the block-list and it WILL block rebooting.
|
||||
func (p *PromClient) ActiveAlerts(filter *regexp.Regexp, firingOnly, filterMatchOnly bool) ([]string, error) {
|
||||
|
||||
// get all alerts from prometheus
|
||||
value, _, err := p.api.Query(context.Background(), "ALERTS", time.Now())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if value.Type() == model.ValVector {
|
||||
if vector, ok := value.(model.Vector); ok {
|
||||
activeAlertSet := make(map[string]bool)
|
||||
for _, sample := range vector {
|
||||
if alertName, isAlert := sample.Metric[model.AlertNameLabel]; isAlert && sample.Value != 0 {
|
||||
if matchesRegex(filter, string(alertName), filterMatchOnly) && (!firingOnly || sample.Metric["alertstate"] == "firing") {
|
||||
activeAlertSet[string(alertName)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var activeAlerts []string
|
||||
for activeAlert := range activeAlertSet {
|
||||
activeAlerts = append(activeAlerts, activeAlert)
|
||||
}
|
||||
sort.Strings(activeAlerts)
|
||||
|
||||
return activeAlerts, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unexpected value type: %v", value)
|
||||
}
|
||||
|
||||
func matchesRegex(filter *regexp.Regexp, alertName string, filterMatchOnly bool) bool {
|
||||
if filter == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return filter.MatchString(string(alertName)) == filterMatchOnly
|
||||
}
|
||||
21
pkg/blockers/blockers.go
Normal file
21
pkg/blockers/blockers.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// Package blockers provides interfaces and implementations for determining
|
||||
// whether a system should be prevented to reboot.
|
||||
// You can use that package if you fork Kured's main loop.
|
||||
package blockers
|
||||
|
||||
// RebootBlocked checks that a single block Checker
|
||||
// will block the reboot or not.
|
||||
func RebootBlocked(blockers ...RebootBlocker) bool {
|
||||
for _, blocker := range blockers {
|
||||
if blocker.IsBlocked() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RebootBlocker interface should be implemented by types
|
||||
// to know if their instantiations should block a reboot
|
||||
type RebootBlocker interface {
|
||||
IsBlocked() bool
|
||||
}
|
||||
65
pkg/blockers/blockers_test.go
Normal file
65
pkg/blockers/blockers_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package blockers
|
||||
|
||||
import (
|
||||
papi "github.com/prometheus/client_golang/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type BlockingChecker struct {
|
||||
blocking bool
|
||||
}
|
||||
|
||||
func (fbc BlockingChecker) IsBlocked() bool {
|
||||
return fbc.blocking
|
||||
}
|
||||
|
||||
func Test_rebootBlocked(t *testing.T) {
|
||||
noCheckers := []RebootBlocker{}
|
||||
nonblockingChecker := BlockingChecker{blocking: false}
|
||||
blockingChecker := BlockingChecker{blocking: true}
|
||||
|
||||
// Instantiate a prometheusClient with a broken_url
|
||||
brokenPrometheusClient := NewPrometheusBlockingChecker(papi.Config{Address: "broken_url"}, nil, false, false)
|
||||
|
||||
type args struct {
|
||||
blockers []RebootBlocker
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Do not block on no blocker defined",
|
||||
args: args{blockers: noCheckers},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Ensure a blocker blocks",
|
||||
args: args{blockers: []RebootBlocker{blockingChecker}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Ensure a non-blocker doesn't block",
|
||||
args: args{blockers: []RebootBlocker{nonblockingChecker}},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Ensure one blocker is enough to block",
|
||||
args: args{blockers: []RebootBlocker{nonblockingChecker, blockingChecker}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Do block on error contacting prometheus API",
|
||||
args: args{blockers: []RebootBlocker{brokenPrometheusClient}},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := RebootBlocked(tt.args.blockers...); got != tt.want {
|
||||
t.Errorf("rebootBlocked() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
64
pkg/blockers/kubernetespod.go
Normal file
64
pkg/blockers/kubernetespod.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package blockers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// Compile-time checks to ensure the type implements the interface
|
||||
var (
|
||||
_ RebootBlocker = (*KubernetesBlockingChecker)(nil)
|
||||
)
|
||||
|
||||
// KubernetesBlockingChecker contains info for connecting
|
||||
// to k8s, and can give info about whether a reboot should be blocked
|
||||
type KubernetesBlockingChecker struct {
|
||||
// client used to contact kubernetes API
|
||||
client *kubernetes.Clientset
|
||||
nodeName string
|
||||
// lised used to filter pods (podSelector)
|
||||
filter []string
|
||||
}
|
||||
|
||||
// NewKubernetesBlockingChecker creates a new KubernetesBlockingChecker using the provided Kubernetes client,
|
||||
// node name, and pod selectors.
|
||||
func NewKubernetesBlockingChecker(client *kubernetes.Clientset, nodename string, podSelectors []string) *KubernetesBlockingChecker {
|
||||
return &KubernetesBlockingChecker{
|
||||
client: client,
|
||||
nodeName: nodename,
|
||||
filter: podSelectors,
|
||||
}
|
||||
}
|
||||
|
||||
// IsBlocked for the KubernetesBlockingChecker will check if a pod, for the node, is preventing
|
||||
// the reboot. It will warn in the logs about blocking, but does not return an error.
|
||||
func (kb KubernetesBlockingChecker) IsBlocked() bool {
|
||||
fieldSelector := fmt.Sprintf("spec.nodeName=%s,status.phase!=Succeeded,status.phase!=Failed,status.phase!=Unknown", kb.nodeName)
|
||||
for _, labelSelector := range kb.filter {
|
||||
podList, err := kb.client.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
FieldSelector: fieldSelector,
|
||||
Limit: 10})
|
||||
if err != nil {
|
||||
log.Warnf("Reboot blocked: pod query error: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(podList.Items) > 0 {
|
||||
podNames := make([]string, 0, len(podList.Items))
|
||||
for _, pod := range podList.Items {
|
||||
podNames = append(podNames, pod.Name)
|
||||
}
|
||||
if len(podList.Continue) > 0 {
|
||||
podNames = append(podNames, "...")
|
||||
}
|
||||
log.Warnf("Reboot blocked: matching pods: %v", podNames)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
121
pkg/blockers/prometheus.go
Normal file
121
pkg/blockers/prometheus.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package blockers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
papi "github.com/prometheus/client_golang/api"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Compile-time checks to ensure the type implements the interface
|
||||
var (
|
||||
_ RebootBlocker = (*PrometheusBlockingChecker)(nil)
|
||||
)
|
||||
|
||||
// PrometheusBlockingChecker contains info for connecting
|
||||
// to prometheus, and can give info about whether a reboot should be blocked
|
||||
type PrometheusBlockingChecker struct {
|
||||
promConfig papi.Config
|
||||
// regexp used to get alerts
|
||||
filter *regexp.Regexp
|
||||
// bool to indicate if only firing alerts should be considered
|
||||
firingOnly bool
|
||||
// bool to indicate that we're only blocking on alerts which match the filter
|
||||
filterMatchOnly bool
|
||||
// storing the promClient
|
||||
promClient papi.Client
|
||||
}
|
||||
|
||||
// NewPrometheusBlockingChecker creates a new PrometheusBlockingChecker using the given
|
||||
// Prometheus API config, alert filter, and filtering options.
|
||||
func NewPrometheusBlockingChecker(config papi.Config, alertFilter *regexp.Regexp, firingOnly bool, filterMatchOnly bool) PrometheusBlockingChecker {
|
||||
promClient, _ := papi.NewClient(config)
|
||||
|
||||
return PrometheusBlockingChecker{
|
||||
promConfig: config,
|
||||
filter: alertFilter,
|
||||
firingOnly: firingOnly,
|
||||
filterMatchOnly: filterMatchOnly,
|
||||
promClient: promClient,
|
||||
}
|
||||
}
|
||||
|
||||
// IsBlocked for the prometheus will check if there are active alerts matching
|
||||
// the arguments given into the PrometheusBlockingChecker which would actively
|
||||
// block the reboot.
|
||||
// As of today, no blocker information is shared as a return of the method,
|
||||
// and the information is simply logged.
|
||||
func (pb PrometheusBlockingChecker) IsBlocked() bool {
|
||||
alertNames, err := pb.ActiveAlerts()
|
||||
if err != nil {
|
||||
log.Warnf("Reboot blocked: prometheus query error: %v", err)
|
||||
return true
|
||||
}
|
||||
count := len(alertNames)
|
||||
if count > 10 {
|
||||
alertNames = append(alertNames[:10], "...")
|
||||
}
|
||||
if count > 0 {
|
||||
log.Warnf("Reboot blocked: %d active alerts: %v", count, alertNames)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MetricLabel is used to give a fancier name
|
||||
// than the type to the label for rebootBlockedCounter
|
||||
func (pb PrometheusBlockingChecker) MetricLabel() string {
|
||||
return "prometheus"
|
||||
}
|
||||
|
||||
// ActiveAlerts is a method of type promClient, it returns a list of names of active alerts
|
||||
// (e.g. pending or firing), filtered by the supplied regexp or by the includeLabels query.
|
||||
// filter by regexp means when the regexp finds the alert-name; the alert is excluded from the
|
||||
// block-list and will NOT block rebooting. query by includeLabel means,
|
||||
// if the query finds an alert, it will include it to the block-list, and it WILL block rebooting.
|
||||
func (pb PrometheusBlockingChecker) ActiveAlerts() ([]string, error) {
|
||||
api := v1.NewAPI(pb.promClient)
|
||||
|
||||
// get all alerts from prometheus
|
||||
value, _, err := api.Query(context.Background(), "ALERTS", time.Now())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if value.Type() == model.ValVector {
|
||||
if vector, ok := value.(model.Vector); ok {
|
||||
activeAlertSet := make(map[string]bool)
|
||||
for _, sample := range vector {
|
||||
if alertName, isAlert := sample.Metric[model.AlertNameLabel]; isAlert && sample.Value != 0 {
|
||||
if matchesRegex(pb.filter, string(alertName), pb.filterMatchOnly) && (!pb.firingOnly || sample.Metric["alertstate"] == "firing") {
|
||||
activeAlertSet[string(alertName)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var activeAlerts []string
|
||||
for activeAlert := range activeAlertSet {
|
||||
activeAlerts = append(activeAlerts, activeAlert)
|
||||
}
|
||||
sort.Strings(activeAlerts)
|
||||
|
||||
return activeAlerts, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unexpected value type %v", value)
|
||||
}
|
||||
|
||||
func matchesRegex(filter *regexp.Regexp, alertName string, filterMatchOnly bool) bool {
|
||||
if filter == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return filter.MatchString(alertName) == filterMatchOnly
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package alerts
|
||||
package blockers
|
||||
|
||||
import (
|
||||
"log"
|
||||
@@ -145,12 +145,9 @@ func TestActiveAlerts(t *testing.T) {
|
||||
regex, _ := regexp.Compile(tc.rFilter)
|
||||
|
||||
// instantiate the prometheus client with the mockserver-address
|
||||
p, err := NewPromClient(api.Config{Address: mockServer.URL})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
p := NewPrometheusBlockingChecker(api.Config{Address: mockServer.URL}, regex, tc.firingOnly, tc.filterMatchOnly)
|
||||
|
||||
result, err := p.ActiveAlerts(regex, tc.firingOnly, tc.filterMatchOnly)
|
||||
result, err := p.ActiveAlerts()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
117
pkg/checkers/checker.go
Normal file
117
pkg/checkers/checker.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// Package checkers provides interfaces and implementations for determining
|
||||
// whether a system reboot is required. It includes checkers based on file
|
||||
// presence or custom commands, and supports privileged command execution
|
||||
// in containerized environments. These checkers are used by kured to
|
||||
// detect conditions that should trigger node reboots.
|
||||
// You can use that package if you fork Kured's main loop.
|
||||
package checkers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/google/shlex"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Checker is the standard interface to use to check
|
||||
// if a reboot is required. Its types must implement a
|
||||
// CheckRebootRequired method which returns a single boolean
|
||||
// clarifying whether a reboot is expected or not.
|
||||
type Checker interface {
|
||||
RebootRequired() bool
|
||||
}
|
||||
|
||||
// FileRebootChecker is the default reboot checker.
|
||||
// It is unprivileged, and tests the presence of a files
|
||||
type FileRebootChecker struct {
|
||||
FilePath string
|
||||
}
|
||||
|
||||
// RebootRequired checks the file presence
|
||||
// needs refactoring to also return an error, instead of leaking it inside the code.
|
||||
// This needs refactoring to get rid of NewCommand
|
||||
// This needs refactoring to only contain file location, instead of CheckCommand
|
||||
func (rc FileRebootChecker) RebootRequired() bool {
|
||||
if _, err := os.Stat(rc.FilePath); err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NewFileRebootChecker is the constructor for the file based reboot checker
|
||||
// TODO: Add extra input validation on filePath string here
|
||||
func NewFileRebootChecker(filePath string) (*FileRebootChecker, error) {
|
||||
return &FileRebootChecker{
|
||||
FilePath: filePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CommandChecker is using a custom command to check
|
||||
// if a reboot is required. There are two modes of behaviour,
|
||||
// if Privileged is granted, the NamespacePid is used to nsenter
|
||||
// the given PID's namespace.
|
||||
type CommandChecker struct {
|
||||
CheckCommand []string
|
||||
NamespacePid int
|
||||
Privileged bool
|
||||
}
|
||||
|
||||
// RebootRequired for CommandChecker runs a command without returning
|
||||
// any eventual error. This should be later refactored to return the errors,
|
||||
// instead of logging and fataling them here.
|
||||
func (rc CommandChecker) RebootRequired() bool {
|
||||
bufStdout := new(bytes.Buffer)
|
||||
bufStderr := new(bytes.Buffer)
|
||||
// #nosec G204 -- CheckCommand is controlled and validated internally
|
||||
cmd := exec.Command(rc.CheckCommand[0], rc.CheckCommand[1:]...)
|
||||
cmd.Stdout = bufStdout
|
||||
cmd.Stderr = bufStderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
switch err := err.(type) {
|
||||
case *exec.ExitError:
|
||||
// We assume a non-zero exit code means 'reboot not required', but of course
|
||||
// the user could have misconfigured the sentinel command or something else
|
||||
// went wrong during its execution. In that case, not entering a reboot loop
|
||||
// is the right thing to do, and we are logging stdout/stderr of the command
|
||||
// so it should be obvious what is wrong.
|
||||
if cmd.ProcessState.ExitCode() != 1 {
|
||||
log.Warn(fmt.Sprintf("sentinel command ended with unexpected exit code: %v", cmd.ProcessState.ExitCode()), "cmd", strings.Join(cmd.Args, " "), "stdout", bufStdout.String(), "stderr", bufStderr.String())
|
||||
}
|
||||
return false
|
||||
default:
|
||||
// Something was grossly misconfigured, such as the command path being wrong.
|
||||
log.Fatal(fmt.Sprintf("Error invoking sentinel command: %v", err), "cmd", strings.Join(cmd.Args, " "), "stdout", bufStdout.String(), "stderr", bufStderr.String())
|
||||
}
|
||||
}
|
||||
log.Info("checking if reboot is required", "cmd", strings.Join(cmd.Args, " "), "stdout", bufStdout.String(), "stderr", bufStderr.String())
|
||||
return true
|
||||
}
|
||||
|
||||
// NewCommandChecker is the constructor for the commandChecker, and by default
|
||||
// runs new commands in a privileged fashion.
|
||||
// Privileged means wrapping the command with nsenter.
|
||||
// It allows to run a command from systemd's namespace for example (pid 1)
|
||||
// This relies on hostPID:true and privileged:true to enter host mount space
|
||||
// For info, rancher based need different pid, which should be user given.
|
||||
// until we have a better discovery mechanism.
|
||||
func NewCommandChecker(sentinelCommand string, pid int, privileged bool) (*CommandChecker, error) {
|
||||
var cmd []string
|
||||
if privileged {
|
||||
cmd = append(cmd, "/usr/bin/nsenter", fmt.Sprintf("-m/proc/%d/ns/mnt", pid), "--")
|
||||
}
|
||||
parsedCommand, err := shlex.Split(sentinelCommand)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing provided sentinel command: %v", err)
|
||||
}
|
||||
cmd = append(cmd, parsedCommand...)
|
||||
return &CommandChecker{
|
||||
CheckCommand: cmd,
|
||||
NamespacePid: pid,
|
||||
Privileged: privileged,
|
||||
}, nil
|
||||
}
|
||||
87
pkg/checkers/checker_test.go
Normal file
87
pkg/checkers/checker_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package checkers
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_nsEntering(t *testing.T) {
|
||||
type args struct {
|
||||
pid int
|
||||
command string
|
||||
privileged bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Ensure command will run with nsenter",
|
||||
args: args{pid: 1, command: "ls -Fal", privileged: true},
|
||||
want: []string{"/usr/bin/nsenter", "-m/proc/1/ns/mnt", "--", "ls", "-Fal"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cc, _ := NewCommandChecker(tt.args.command, tt.args.pid, tt.args.privileged)
|
||||
if !reflect.DeepEqual(cc.CheckCommand, tt.want) {
|
||||
t.Errorf("command parsed as %v, want %v", cc.CheckCommand, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_rebootRequired(t *testing.T) {
|
||||
type args struct {
|
||||
sentinelCommand []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
fatals bool
|
||||
}{
|
||||
{
|
||||
name: "Ensure rc = 0 means reboot required",
|
||||
args: args{
|
||||
sentinelCommand: []string{"true"},
|
||||
},
|
||||
want: true,
|
||||
fatals: false,
|
||||
},
|
||||
{
|
||||
name: "Ensure rc != 0 means reboot NOT required",
|
||||
args: args{
|
||||
sentinelCommand: []string{"false"},
|
||||
},
|
||||
want: false,
|
||||
fatals: false,
|
||||
},
|
||||
{
|
||||
name: "Ensure a wrong command fatals",
|
||||
args: args{
|
||||
sentinelCommand: []string{"./babar"},
|
||||
},
|
||||
want: true,
|
||||
fatals: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer func() { log.StandardLogger().ExitFunc = nil }()
|
||||
fatal := false
|
||||
log.StandardLogger().ExitFunc = func(int) { fatal = true }
|
||||
|
||||
a := CommandChecker{CheckCommand: tt.args.sentinelCommand, NamespacePid: 1, Privileged: false}
|
||||
|
||||
if got := a.RebootRequired(); got != tt.want {
|
||||
t.Errorf("rebootRequired() = %v, want %v", got, tt.want)
|
||||
}
|
||||
if tt.fatals != fatal {
|
||||
t.Errorf("fatal flag is %v, want fatal %v", fatal, tt.fatals)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,18 @@
|
||||
// Package daemonsetlock provides mechanisms for leader election and locking
|
||||
// using Kubernetes DaemonSets. It enables distributed coordination of operations
|
||||
// (such as reboots) by ensuring only one node acts as the leader at any time,
|
||||
// leveraging Kubernetes primitives for safe, atomic locking in clusters.
|
||||
package daemonsetlock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -18,6 +25,25 @@ const (
|
||||
k8sAPICallRetryTimeout = 5 * time.Minute // How long to wait until we determine that the k8s API is definitively unavailable
|
||||
)
|
||||
|
||||
// Lock defines the interface for acquiring, releasing, and checking
|
||||
// the status of a reboot coordination lock.
|
||||
type Lock interface {
|
||||
Acquire(NodeMeta) (bool, string, error)
|
||||
Release() error
|
||||
Holding() (bool, LockAnnotationValue, error)
|
||||
}
|
||||
|
||||
// GenericLock holds the configuration for lock TTL and the delay before releasing it.
|
||||
type GenericLock struct {
|
||||
TTL time.Duration
|
||||
releaseDelay time.Duration
|
||||
}
|
||||
|
||||
// NodeMeta contains metadata about a node relevant to scheduling decisions.
|
||||
type NodeMeta struct {
|
||||
Unschedulable bool `json:"unschedulable"`
|
||||
}
|
||||
|
||||
// DaemonSetLock holds all necessary information to do actions
|
||||
// on the kured ds which holds lock info through annotations.
|
||||
type DaemonSetLock struct {
|
||||
@@ -28,34 +54,98 @@ type DaemonSetLock struct {
|
||||
annotation string
|
||||
}
|
||||
|
||||
type lockAnnotationValue struct {
|
||||
// DaemonSetSingleLock holds all necessary information to do actions
|
||||
// on the kured ds which holds lock info through annotations.
|
||||
type DaemonSetSingleLock struct {
|
||||
GenericLock
|
||||
DaemonSetLock
|
||||
}
|
||||
|
||||
// DaemonSetMultiLock holds all necessary information to do actions
|
||||
// on the kured ds which holds lock info through annotations, valid
|
||||
// for multiple nodes
|
||||
type DaemonSetMultiLock struct {
|
||||
GenericLock
|
||||
DaemonSetLock
|
||||
maxOwners int
|
||||
}
|
||||
|
||||
// LockAnnotationValue contains the lock data,
|
||||
// which allows persistence across reboots, particularily recording if the
|
||||
// node was already unschedulable before kured reboot.
|
||||
// To be modified when using another type of lock storage.
|
||||
type LockAnnotationValue struct {
|
||||
NodeID string `json:"nodeID"`
|
||||
Metadata interface{} `json:"metadata,omitempty"`
|
||||
Metadata NodeMeta `json:"metadata,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
TTL time.Duration `json:"TTL"`
|
||||
}
|
||||
|
||||
type multiLockAnnotationValue struct {
|
||||
MaxOwners int `json:"maxOwners"`
|
||||
LockAnnotations []lockAnnotationValue `json:"locks"`
|
||||
LockAnnotations []LockAnnotationValue `json:"locks"`
|
||||
}
|
||||
|
||||
// New creates a daemonsetLock object containing the necessary data for follow up k8s requests
|
||||
func New(client *kubernetes.Clientset, nodeID, namespace, name, annotation string) *DaemonSetLock {
|
||||
return &DaemonSetLock{client, nodeID, namespace, name, annotation}
|
||||
func New(client *kubernetes.Clientset, nodeID, namespace, name, annotation string, TTL time.Duration, concurrency int, lockReleaseDelay time.Duration) Lock {
|
||||
if concurrency > 1 {
|
||||
return &DaemonSetMultiLock{
|
||||
GenericLock: GenericLock{
|
||||
TTL: TTL,
|
||||
releaseDelay: lockReleaseDelay,
|
||||
},
|
||||
DaemonSetLock: DaemonSetLock{
|
||||
client: client,
|
||||
nodeID: nodeID,
|
||||
namespace: namespace,
|
||||
name: name,
|
||||
annotation: annotation,
|
||||
},
|
||||
maxOwners: concurrency,
|
||||
}
|
||||
}
|
||||
return &DaemonSetSingleLock{
|
||||
GenericLock: GenericLock{
|
||||
TTL: TTL,
|
||||
releaseDelay: lockReleaseDelay,
|
||||
},
|
||||
DaemonSetLock: DaemonSetLock{
|
||||
client: client,
|
||||
nodeID: nodeID,
|
||||
namespace: namespace,
|
||||
name: name,
|
||||
annotation: annotation,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSet returns the named DaemonSet resource from the DaemonSetLock's configured client
|
||||
func (dsl *DaemonSetLock) GetDaemonSet(sleep, timeout time.Duration) (*v1.DaemonSet, error) {
|
||||
var ds *v1.DaemonSet
|
||||
var lastError error
|
||||
err := wait.PollUntilContextTimeout(context.Background(), sleep, timeout, true, func(ctx context.Context) (bool, error) {
|
||||
if ds, lastError = dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(ctx, dsl.name, metav1.GetOptions{}); lastError != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %v", dsl.name, dsl.namespace, lastError)
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// Acquire attempts to annotate the kured daemonset with lock info from instantiated DaemonSetLock using client-go
|
||||
func (dsl *DaemonSetLock) Acquire(metadata interface{}, TTL time.Duration) (bool, string, error) {
|
||||
func (dsl *DaemonSetSingleLock) Acquire(nodeMetadata NodeMeta) (bool, string, error) {
|
||||
for {
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return false, "", fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
|
||||
valueString, exists := ds.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
value := lockAnnotationValue{}
|
||||
value := LockAnnotationValue{}
|
||||
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
@@ -65,15 +155,22 @@ func (dsl *DaemonSetLock) Acquire(metadata interface{}, TTL time.Duration) (bool
|
||||
}
|
||||
}
|
||||
|
||||
if ds.ObjectMeta.Annotations == nil {
|
||||
ds.ObjectMeta.Annotations = make(map[string]string)
|
||||
if ds.Annotations == nil {
|
||||
ds.Annotations = make(map[string]string)
|
||||
}
|
||||
value := lockAnnotationValue{NodeID: dsl.nodeID, Metadata: metadata, Created: time.Now().UTC(), TTL: TTL}
|
||||
|
||||
value := LockAnnotationValue{
|
||||
NodeID: dsl.nodeID,
|
||||
Metadata: nodeMetadata,
|
||||
Created: time.Now().UTC(),
|
||||
TTL: dsl.TTL,
|
||||
}
|
||||
|
||||
valueBytes, err := json.Marshal(&value)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
ds.ObjectMeta.Annotations[dsl.annotation] = string(valueBytes)
|
||||
ds.Annotations[dsl.annotation] = string(valueBytes)
|
||||
|
||||
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
@@ -81,57 +178,85 @@ func (dsl *DaemonSetLock) Acquire(metadata interface{}, TTL time.Duration) (bool
|
||||
// Something else updated the resource between us reading and writing - try again soon
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
} else {
|
||||
return false, "", err
|
||||
}
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
return true, dsl.nodeID, nil
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireMultiple creates and annotates the daemonset with a multiple owner lock
|
||||
func (dsl *DaemonSetLock) AcquireMultiple(metadata interface{}, TTL time.Duration, maxOwners int) (bool, []string, error) {
|
||||
// Holding checks if the current node still holds the lock based on the DaemonSet annotation.
|
||||
func (dsl *DaemonSetSingleLock) Holding() (bool, LockAnnotationValue, error) {
|
||||
var lockData LockAnnotationValue
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return false, lockData, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
valueString, exists := ds.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
value := LockAnnotationValue{}
|
||||
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
|
||||
return false, lockData, err
|
||||
}
|
||||
|
||||
if !ttlExpired(value.Created, value.TTL) {
|
||||
return value.NodeID == dsl.nodeID, value, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, lockData, nil
|
||||
}
|
||||
|
||||
// Release attempts to remove the lock data from the kured ds annotations using client-go
|
||||
func (dsl *DaemonSetSingleLock) Release() error {
|
||||
if dsl.releaseDelay > 0 {
|
||||
log.Infof("Waiting %v before releasing lock", dsl.releaseDelay)
|
||||
time.Sleep(dsl.releaseDelay)
|
||||
}
|
||||
for {
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return false, []string{}, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
return fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
annotation := multiLockAnnotationValue{}
|
||||
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
|
||||
valueString, exists := ds.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
if err := json.Unmarshal([]byte(valueString), &annotation); err != nil {
|
||||
return false, []string{}, fmt.Errorf("error getting multi lock: %w", err)
|
||||
value := LockAnnotationValue{}
|
||||
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if value.NodeID != dsl.nodeID {
|
||||
return fmt.Errorf("not lock holder: %v", value.NodeID)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("lock not held")
|
||||
}
|
||||
|
||||
lockPossible, newAnnotation := dsl.canAcquireMultiple(annotation, metadata, TTL, maxOwners)
|
||||
if !lockPossible {
|
||||
return false, nodeIDsFromMultiLock(newAnnotation), nil
|
||||
}
|
||||
delete(ds.Annotations, dsl.annotation)
|
||||
|
||||
if ds.ObjectMeta.Annotations == nil {
|
||||
ds.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
newAnnotationBytes, err := json.Marshal(&newAnnotation)
|
||||
if err != nil {
|
||||
return false, []string{}, fmt.Errorf("error marshalling new annotation lock: %w", err)
|
||||
}
|
||||
ds.ObjectMeta.Annotations[dsl.annotation] = string(newAnnotationBytes)
|
||||
|
||||
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.Background(), ds, metav1.UpdateOptions{})
|
||||
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
// Something else updated the resource between us reading and writing - try again soon
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
} else {
|
||||
return false, []string{}, fmt.Errorf("error updating daemonset with multi lock: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return true, nodeIDsFromMultiLock(newAnnotation), nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func ttlExpired(created time.Time, ttl time.Duration) bool {
|
||||
if ttl > 0 && time.Since(created) >= ttl {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func nodeIDsFromMultiLock(annotation multiLockAnnotationValue) []string {
|
||||
nodeIDs := make([]string, 0, len(annotation.LockAnnotations))
|
||||
for _, nodeLock := range annotation.LockAnnotations {
|
||||
@@ -140,7 +265,7 @@ func nodeIDsFromMultiLock(annotation multiLockAnnotationValue) []string {
|
||||
return nodeIDs
|
||||
}
|
||||
|
||||
func (dsl *DaemonSetLock) canAcquireMultiple(annotation multiLockAnnotationValue, metadata interface{}, TTL time.Duration, maxOwners int) (bool, multiLockAnnotationValue) {
|
||||
func (dsl *DaemonSetLock) canAcquireMultiple(annotation multiLockAnnotationValue, metadata NodeMeta, TTL time.Duration, maxOwners int) (bool, multiLockAnnotationValue) {
|
||||
newAnnotation := multiLockAnnotationValue{MaxOwners: maxOwners}
|
||||
freeSpace := false
|
||||
if annotation.LockAnnotations == nil || len(annotation.LockAnnotations) < maxOwners {
|
||||
@@ -162,7 +287,7 @@ func (dsl *DaemonSetLock) canAcquireMultiple(annotation multiLockAnnotationValue
|
||||
if freeSpace {
|
||||
newAnnotation.LockAnnotations = append(
|
||||
newAnnotation.LockAnnotations,
|
||||
lockAnnotationValue{
|
||||
LockAnnotationValue{
|
||||
NodeID: dsl.nodeID,
|
||||
Metadata: metadata,
|
||||
Created: time.Now().UTC(),
|
||||
@@ -175,99 +300,87 @@ func (dsl *DaemonSetLock) canAcquireMultiple(annotation multiLockAnnotationValue
|
||||
return false, multiLockAnnotationValue{}
|
||||
}
|
||||
|
||||
// Test attempts to check the kured daemonset lock status (existence, expiry) from instantiated DaemonSetLock using client-go
|
||||
func (dsl *DaemonSetLock) Test(metadata interface{}) (bool, error) {
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
value := lockAnnotationValue{Metadata: metadata}
|
||||
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
|
||||
return false, err
|
||||
// Acquire creates and annotates the daemonset with a multiple owner lock
|
||||
func (dsl *DaemonSetMultiLock) Acquire(nodeMetaData NodeMeta) (bool, string, error) {
|
||||
for {
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return false, "", fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
if !ttlExpired(value.Created, value.TTL) {
|
||||
return value.NodeID == dsl.nodeID, nil
|
||||
annotation := multiLockAnnotationValue{}
|
||||
valueString, exists := ds.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
if err := json.Unmarshal([]byte(valueString), &annotation); err != nil {
|
||||
return false, "", fmt.Errorf("error getting multi lock: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
lockPossible, newAnnotation := dsl.canAcquireMultiple(annotation, nodeMetaData, dsl.TTL, dsl.maxOwners)
|
||||
if !lockPossible {
|
||||
return false, strings.Join(nodeIDsFromMultiLock(newAnnotation), ","), nil
|
||||
}
|
||||
|
||||
if ds.Annotations == nil {
|
||||
ds.Annotations = make(map[string]string)
|
||||
}
|
||||
newAnnotationBytes, err := json.Marshal(&newAnnotation)
|
||||
if err != nil {
|
||||
return false, "", fmt.Errorf("error marshalling new annotation lock: %w", err)
|
||||
}
|
||||
ds.Annotations[dsl.annotation] = string(newAnnotationBytes)
|
||||
|
||||
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.Background(), ds, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
return false, "", fmt.Errorf("error updating daemonset with multi lock: %w", err)
|
||||
|
||||
}
|
||||
return true, strings.Join(nodeIDsFromMultiLock(newAnnotation), ","), nil
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiple attempts to check the kured daemonset lock status for multi locks
|
||||
func (dsl *DaemonSetLock) TestMultiple() (bool, error) {
|
||||
// Holding checks whether the current node is holding a valid lock for the DaemonSetMultiLock.
|
||||
func (dsl *DaemonSetMultiLock) Holding() (bool, LockAnnotationValue, error) {
|
||||
var lockdata LockAnnotationValue
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
return false, lockdata, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
|
||||
valueString, exists := ds.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
value := multiLockAnnotationValue{}
|
||||
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
|
||||
return false, err
|
||||
return false, lockdata, err
|
||||
}
|
||||
|
||||
for _, nodeLock := range value.LockAnnotations {
|
||||
if nodeLock.NodeID == dsl.nodeID && !ttlExpired(nodeLock.Created, nodeLock.TTL) {
|
||||
return true, nil
|
||||
return true, nodeLock, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return false, lockdata, nil
|
||||
}
|
||||
|
||||
// Release attempts to remove the lock data from the kured ds annotations using client-go
|
||||
func (dsl *DaemonSetLock) Release() error {
|
||||
// Release attempts to remove the lock data for a single node from the multi node annotation
|
||||
func (dsl *DaemonSetMultiLock) Release() error {
|
||||
if dsl.releaseDelay > 0 {
|
||||
log.Infof("Waiting %v before releasing lock", dsl.releaseDelay)
|
||||
time.Sleep(dsl.releaseDelay)
|
||||
}
|
||||
for {
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
value := lockAnnotationValue{}
|
||||
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if value.NodeID != dsl.nodeID {
|
||||
return fmt.Errorf("Not lock holder: %v", value.NodeID)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Lock not held")
|
||||
}
|
||||
|
||||
delete(ds.ObjectMeta.Annotations, dsl.annotation)
|
||||
|
||||
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
// Something else updated the resource between us reading and writing - try again soon
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ReleaseMultiple attempts to remove the lock data from the kured ds annotations using client-go
|
||||
func (dsl *DaemonSetLock) ReleaseMultiple() error {
|
||||
for {
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
|
||||
valueString, exists := ds.Annotations[dsl.annotation]
|
||||
modified := false
|
||||
value := multiLockAnnotationValue{}
|
||||
if exists {
|
||||
@@ -292,7 +405,7 @@ func (dsl *DaemonSetLock) ReleaseMultiple() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshalling new annotation on release: %v", err)
|
||||
}
|
||||
ds.ObjectMeta.Annotations[dsl.annotation] = string(newAnnotationBytes)
|
||||
ds.Annotations[dsl.annotation] = string(newAnnotationBytes)
|
||||
|
||||
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
@@ -300,35 +413,9 @@ func (dsl *DaemonSetLock) ReleaseMultiple() error {
|
||||
// Something else updated the resource between us reading and writing - try again soon
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSet returns the named DaemonSet resource from the DaemonSetLock's configured client
|
||||
func (dsl *DaemonSetLock) GetDaemonSet(sleep, timeout time.Duration) (*v1.DaemonSet, error) {
|
||||
var ds *v1.DaemonSet
|
||||
var lastError error
|
||||
err := wait.PollImmediate(sleep, timeout, func() (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
if ds, lastError = dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(ctx, dsl.name, metav1.GetOptions{}); lastError != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Timed out trying to get daemonset %s in namespace %s: %v", dsl.name, dsl.namespace, lastError)
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
func ttlExpired(created time.Time, ttl time.Duration) bool {
|
||||
if ttl > 0 && time.Since(created) >= ttl {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestCanAcquireMultiple(t *testing.T) {
|
||||
current: multiLockAnnotationValue{},
|
||||
desired: multiLockAnnotationValue{
|
||||
MaxOwners: 2,
|
||||
LockAnnotations: []lockAnnotationValue{
|
||||
LockAnnotations: []LockAnnotationValue{
|
||||
{NodeID: node1Name},
|
||||
},
|
||||
},
|
||||
@@ -80,13 +80,13 @@ func TestCanAcquireMultiple(t *testing.T) {
|
||||
maxOwners: 2,
|
||||
current: multiLockAnnotationValue{
|
||||
MaxOwners: 2,
|
||||
LockAnnotations: []lockAnnotationValue{
|
||||
LockAnnotations: []LockAnnotationValue{
|
||||
{NodeID: node2Name},
|
||||
},
|
||||
},
|
||||
desired: multiLockAnnotationValue{
|
||||
MaxOwners: 2,
|
||||
LockAnnotations: []lockAnnotationValue{
|
||||
LockAnnotations: []LockAnnotationValue{
|
||||
{NodeID: node1Name},
|
||||
{NodeID: node2Name},
|
||||
},
|
||||
@@ -101,7 +101,7 @@ func TestCanAcquireMultiple(t *testing.T) {
|
||||
maxOwners: 2,
|
||||
current: multiLockAnnotationValue{
|
||||
MaxOwners: 2,
|
||||
LockAnnotations: []lockAnnotationValue{
|
||||
LockAnnotations: []LockAnnotationValue{
|
||||
{
|
||||
NodeID: node2Name,
|
||||
Created: time.Now().UTC().Add(-1 * time.Minute),
|
||||
@@ -116,7 +116,7 @@ func TestCanAcquireMultiple(t *testing.T) {
|
||||
},
|
||||
desired: multiLockAnnotationValue{
|
||||
MaxOwners: 2,
|
||||
LockAnnotations: []lockAnnotationValue{
|
||||
LockAnnotations: []LockAnnotationValue{
|
||||
{NodeID: node2Name},
|
||||
{NodeID: node3Name},
|
||||
},
|
||||
@@ -131,7 +131,7 @@ func TestCanAcquireMultiple(t *testing.T) {
|
||||
maxOwners: 2,
|
||||
current: multiLockAnnotationValue{
|
||||
MaxOwners: 2,
|
||||
LockAnnotations: []lockAnnotationValue{
|
||||
LockAnnotations: []LockAnnotationValue{
|
||||
{
|
||||
NodeID: node2Name,
|
||||
Created: time.Now().UTC().Add(-1 * time.Hour),
|
||||
@@ -146,7 +146,7 @@ func TestCanAcquireMultiple(t *testing.T) {
|
||||
},
|
||||
desired: multiLockAnnotationValue{
|
||||
MaxOwners: 2,
|
||||
LockAnnotations: []lockAnnotationValue{
|
||||
LockAnnotations: []LockAnnotationValue{
|
||||
{NodeID: node1Name},
|
||||
{NodeID: node3Name},
|
||||
},
|
||||
@@ -161,7 +161,7 @@ func TestCanAcquireMultiple(t *testing.T) {
|
||||
maxOwners: 2,
|
||||
current: multiLockAnnotationValue{
|
||||
MaxOwners: 2,
|
||||
LockAnnotations: []lockAnnotationValue{
|
||||
LockAnnotations: []LockAnnotationValue{
|
||||
{
|
||||
NodeID: node2Name,
|
||||
Created: time.Now().UTC().Add(-1 * time.Hour),
|
||||
@@ -176,17 +176,17 @@ func TestCanAcquireMultiple(t *testing.T) {
|
||||
},
|
||||
desired: multiLockAnnotationValue{
|
||||
MaxOwners: 2,
|
||||
LockAnnotations: []lockAnnotationValue{
|
||||
LockAnnotations: []LockAnnotationValue{
|
||||
{NodeID: node1Name},
|
||||
},
|
||||
},
|
||||
lockPossible: true,
|
||||
},
|
||||
}
|
||||
|
||||
nm := NodeMeta{Unschedulable: false}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
lockPossible, actual := testCase.daemonSetLock.canAcquireMultiple(testCase.current, struct{}{}, time.Minute, testCase.maxOwners)
|
||||
lockPossible, actual := testCase.daemonSetLock.canAcquireMultiple(testCase.current, nm, time.Minute, testCase.maxOwners)
|
||||
if lockPossible != testCase.lockPossible {
|
||||
t.Fatalf(
|
||||
"unexpected result for lock possible (got %t expected %t new annotation %v",
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package delaytick provides utilities for scheduling periodic events
|
||||
// with an initial randomized delay. It is primarily used to delay the
|
||||
// start of regular ticks, helping to avoid thundering herd problems
|
||||
// when multiple nodes begin operations simultaneously.
|
||||
// You can use that a random ticker in other projects, but there is
|
||||
// no garantee that it will stay (initial plan was to move it to internal)
|
||||
package delaytick
|
||||
|
||||
import (
|
||||
@@ -10,6 +16,7 @@ func New(s rand.Source, d time.Duration) <-chan time.Time {
|
||||
c := make(chan time.Time)
|
||||
|
||||
go func() {
|
||||
// #nosec G404 -- math/rand is used here for non-security timing jitter
|
||||
random := rand.New(s)
|
||||
time.Sleep(time.Duration(float64(d)/2 + float64(d)*random.Float64()))
|
||||
c <- time.Now()
|
||||
|
||||
@@ -1,25 +1,49 @@
|
||||
package reboot
|
||||
|
||||
import (
|
||||
"github.com/kubereboot/kured/pkg/util"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/google/shlex"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CommandRebootMethod holds context-information for a command reboot.
|
||||
type CommandRebootMethod struct {
|
||||
nodeID string
|
||||
rebootCommand []string
|
||||
// CommandRebooter holds context-information for a reboot with command
|
||||
type CommandRebooter struct {
|
||||
RebootCommand []string
|
||||
}
|
||||
|
||||
// NewCommandReboot creates a new command-rebooter which needs full privileges on the host.
|
||||
func NewCommandReboot(nodeID string, rebootCommand []string) *CommandRebootMethod {
|
||||
return &CommandRebootMethod{nodeID: nodeID, rebootCommand: rebootCommand}
|
||||
}
|
||||
// Reboot triggers the reboot command
|
||||
func (c CommandRebooter) Reboot() error {
|
||||
log.Infof("Invoking command: %s", c.RebootCommand)
|
||||
|
||||
// Reboot triggers the command-reboot.
|
||||
func (c *CommandRebootMethod) Reboot() {
|
||||
log.Infof("Running command: %s for node: %s", c.rebootCommand, c.nodeID)
|
||||
if err := util.NewCommand(c.rebootCommand[0], c.rebootCommand[1:]...).Run(); err != nil {
|
||||
log.Fatalf("Error invoking reboot command: %v", err)
|
||||
bufStdout := new(bytes.Buffer)
|
||||
bufStderr := new(bytes.Buffer)
|
||||
cmd := exec.Command(c.RebootCommand[0], c.RebootCommand[1:]...) // #nosec G204
|
||||
cmd.Stdout = bufStdout
|
||||
cmd.Stderr = bufStderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("error invoking reboot command %s: %v (stdout: %v, stderr: %v)", c.RebootCommand, err, bufStdout.String(), bufStderr.String())
|
||||
}
|
||||
log.Info("Invoked reboot command", "cmd", strings.Join(cmd.Args, " "), "stdout", bufStdout.String(), "stderr", bufStderr.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewCommandRebooter is the constructor to create a CommandRebooter from a string not
|
||||
// yet shell lexed. You can skip this constructor if you parse the data correctly first
|
||||
// when instantiating a CommandRebooter instance.
|
||||
func NewCommandRebooter(rebootCommand string) (*CommandRebooter, error) {
|
||||
if rebootCommand == "" {
|
||||
return nil, fmt.Errorf("no reboot command specified")
|
||||
}
|
||||
cmd := []string{"/usr/bin/nsenter", fmt.Sprintf("-m/proc/%d/ns/mnt", 1), "--"}
|
||||
parsedCommand, err := shlex.Split(rebootCommand)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error %v when parsing reboot command %s", err, rebootCommand)
|
||||
}
|
||||
cmd = append(cmd, parsedCommand...)
|
||||
return &CommandRebooter{RebootCommand: cmd}, nil
|
||||
}
|
||||
|
||||
43
pkg/reboot/command_test.go
Normal file
43
pkg/reboot/command_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package reboot
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewCommandRebooter(t *testing.T) {
|
||||
type args struct {
|
||||
rebootCommand string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *CommandRebooter
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Ensure command is nsenter wrapped",
|
||||
args: args{"ls -Fal"},
|
||||
want: &CommandRebooter{RebootCommand: []string{"/usr/bin/nsenter", "-m/proc/1/ns/mnt", "--", "ls", "-Fal"}},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Ensure empty command is erroring",
|
||||
args: args{""},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := NewCommandRebooter(tt.args.rebootCommand)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("NewCommandRebooter() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("NewCommandRebooter() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,13 @@
|
||||
// Package reboot provides mechanisms to trigger node reboots using different
|
||||
// methods, like custom commands or signals.
|
||||
// Each of those includes constructors and interfaces for handling different reboot
|
||||
// strategies, supporting privileged command execution via nsenter for containerized environments.
|
||||
package reboot
|
||||
|
||||
// Reboot interface defines the Reboot function to be implemented.
|
||||
type Reboot interface {
|
||||
Reboot()
|
||||
// Rebooter is the standard interface to use to execute
|
||||
// the reboot, after it has been considered as necessary.
|
||||
// The Reboot method does not expect any return, yet should
|
||||
// most likely be refactored in the future to return an error
|
||||
type Rebooter interface {
|
||||
Reboot() error
|
||||
}
|
||||
|
||||
@@ -1,34 +1,37 @@
|
||||
package reboot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SignalRebootMethod holds context-information for a signal reboot.
|
||||
type SignalRebootMethod struct {
|
||||
nodeID string
|
||||
signal int
|
||||
// SignalRebooter holds context-information for a signal reboot.
|
||||
type SignalRebooter struct {
|
||||
Signal int
|
||||
}
|
||||
|
||||
// NewSignalReboot creates a new signal-rebooter which can run unprivileged.
|
||||
func NewSignalReboot(nodeID string, signal int) *SignalRebootMethod {
|
||||
return &SignalRebootMethod{nodeID: nodeID, signal: signal}
|
||||
}
|
||||
|
||||
// Reboot triggers the signal-reboot.
|
||||
func (c *SignalRebootMethod) Reboot() {
|
||||
log.Infof("Emit reboot-signal for node: %s", c.nodeID)
|
||||
|
||||
// Reboot triggers the reboot signal
|
||||
func (c SignalRebooter) Reboot() error {
|
||||
process, err := os.FindProcess(1)
|
||||
if err != nil {
|
||||
log.Fatalf("There was no systemd process found: %v", err)
|
||||
return fmt.Errorf("not running on Unix: %v", err)
|
||||
}
|
||||
|
||||
err = process.Signal(syscall.Signal(c.signal))
|
||||
err = process.Signal(syscall.Signal(c.Signal))
|
||||
// Either PID does not exist, or the signal does not work. Hoping for
|
||||
// a decent enough error.
|
||||
if err != nil {
|
||||
log.Fatalf("Signal of SIGRTMIN+5 failed: %v", err)
|
||||
return fmt.Errorf("signal of SIGRTMIN+5 failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewSignalRebooter is the constructor which sets the signal number.
|
||||
// The constructor does not yet validate any input. It should be done in a later commit.
|
||||
func NewSignalRebooter(sig int) (*SignalRebooter, error) {
|
||||
if sig < 1 {
|
||||
return nil, fmt.Errorf("invalid signal: %v", sig)
|
||||
}
|
||||
return &SignalRebooter{Signal: sig}, nil
|
||||
}
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Package taints provides utilities to manage Kubernetes node taints for controlling
|
||||
// pod scheduling and execution. It allows setting, removing, and checking taints on nodes,
|
||||
// using Kubernetes client-go and JSON patching for atomic updates.
|
||||
package taints
|
||||
|
||||
import (
|
||||
|
||||
@@ -50,7 +50,7 @@ func parseWeekdays(days []string) (weekdays, error) {
|
||||
if err != nil {
|
||||
return weekdays(0), err
|
||||
}
|
||||
|
||||
// #nosec G115 -- weekday is guaranteed to be between 0–6 by parseWeekday()
|
||||
result |= 1 << uint32(weekday)
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ func parseWeekdays(days []string) (weekdays, error) {
|
||||
|
||||
// Contains returns true if the specified weekday is a member of this set.
|
||||
func (w weekdays) Contains(day time.Weekday) bool {
|
||||
// #nosec G115 -- day is time.Weekday [0-6], shift safe within uint32
|
||||
return uint32(w)&(1<<uint32(day)) != 0
|
||||
}
|
||||
|
||||
@@ -81,11 +82,11 @@ func parseWeekday(day string) (time.Weekday, error) {
|
||||
if n >= 0 && n < 7 {
|
||||
return time.Weekday(n), nil
|
||||
}
|
||||
return time.Sunday, fmt.Errorf("Invalid weekday, number out of range: %s", day)
|
||||
return time.Sunday, fmt.Errorf("invalid weekday, number out of range: %s", day)
|
||||
}
|
||||
|
||||
if weekday, ok := dayStrings[strings.ToLower(day)]; ok {
|
||||
return weekday, nil
|
||||
}
|
||||
return time.Sunday, fmt.Errorf("Invalid weekday: %s", day)
|
||||
return time.Sunday, fmt.Errorf("invalid weekday: %s", day)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Package timewindow provides utilities for handling days of the week,
|
||||
// including parsing, representing, and manipulating sets of weekdays.
|
||||
// It enables flexible specification of time windows for reboot operations
|
||||
// in kured, supporting various formats and convenience functions.
|
||||
package timewindow
|
||||
|
||||
import (
|
||||
@@ -77,5 +81,5 @@ func parseTime(s string, loc *time.Location) (time.Time, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return time.Now(), fmt.Errorf("Invalid time format: %s", s)
|
||||
return time.Now(), fmt.Errorf("invalid time format: %s", s)
|
||||
}
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewCommand creates a new Command with stdout/stderr wired to our standard logger
|
||||
func NewCommand(name string, arg ...string) *exec.Cmd {
|
||||
cmd := exec.Command(name, arg...)
|
||||
cmd.Stdout = log.NewEntry(log.StandardLogger()).
|
||||
WithField("cmd", cmd.Args[0]).
|
||||
WithField("std", "out").
|
||||
WriterLevel(log.InfoLevel)
|
||||
|
||||
cmd.Stderr = log.NewEntry(log.StandardLogger()).
|
||||
WithField("cmd", cmd.Args[0]).
|
||||
WithField("std", "err").
|
||||
WriterLevel(log.WarnLevel)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# USE KUBECTL_CMD to pass context and/or namespaces.
|
||||
KUBECTL_CMD="${KUBECTL_CMD:-kubectl}"
|
||||
SENTINEL_FILE="${SENTINEL_FILE:-/var/run/reboot-required}"
|
||||
|
||||
echo "Creating reboot sentinel on all nodes"
|
||||
|
||||
for nodename in $("$KUBECTL_CMD" get nodes -o name); do
|
||||
docker exec "${nodename/node\//}" hostname
|
||||
docker exec "${nodename/node\//}" touch "${SENTINEL_FILE}"
|
||||
done
|
||||
429
tests/kind/main_test.go
Normal file
429
tests/kind/main_test.go
Normal file
@@ -0,0 +1,429 @@
|
||||
package kind
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
kuredDevImage string = "kured:dev"
|
||||
)
|
||||
|
||||
// KindTest cluster deployed by each TestMain function, prepared to run a given test scenario.
|
||||
type KindTest struct {
|
||||
kindConfigPath string
|
||||
clusterName string
|
||||
timeout time.Duration
|
||||
deployManifests []string
|
||||
localImages []string
|
||||
logsDir string
|
||||
logBuffer bytes.Buffer
|
||||
testInstance *testing.T // Maybe move this to testing.TB
|
||||
}
|
||||
|
||||
func (k *KindTest) Write(p []byte) (n int, err error) {
|
||||
k.testInstance.Helper()
|
||||
k.logBuffer.Write(p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (k *KindTest) FlushLog() {
|
||||
k.testInstance.Helper()
|
||||
k.testInstance.Log(k.logBuffer.String())
|
||||
k.logBuffer.Reset()
|
||||
}
|
||||
|
||||
func (k *KindTest) RunCmd(cmdDetails ...string) error {
|
||||
cmd := exec.Command(cmdDetails[0], cmdDetails[1:]...)
|
||||
// by making KindTest a Writer, we can simply wire k to logs
|
||||
// writing to k will write to proper logs.
|
||||
cmd.Stdout = k
|
||||
cmd.Stderr = k
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Option that can be passed to the NewKind function in order to change the configuration
|
||||
// of the test cluster
|
||||
type Option func(k *KindTest)
|
||||
|
||||
// Deploy can be passed to NewKind to deploy extra components, in addition to the base deployment.
|
||||
func Deploy(manifest string) Option {
|
||||
return func(k *KindTest) {
|
||||
k.deployManifests = append(k.deployManifests, manifest)
|
||||
}
|
||||
}
|
||||
|
||||
// ExportLogs can be passed to NewKind to specify the folder where the kubernetes logs will be exported after the tests.
|
||||
func ExportLogs(folder string) Option {
|
||||
return func(k *KindTest) {
|
||||
k.logsDir = folder
|
||||
}
|
||||
}
|
||||
|
||||
// Timeout for long-running operations (e.g. deployments, readiness probes...)
|
||||
func Timeout(t time.Duration) Option {
|
||||
return func(k *KindTest) {
|
||||
k.timeout = t
|
||||
}
|
||||
}
|
||||
|
||||
// LocalImage is passed to NewKind to allow loading a local Docker image into the cluster
|
||||
func LocalImage(nameTag string) Option {
|
||||
return func(k *KindTest) {
|
||||
k.localImages = append(k.localImages, nameTag)
|
||||
}
|
||||
}
|
||||
|
||||
// NewKind creates a kind cluster given a name and set of Option instances.
|
||||
func NewKindTester(kindClusterName string, filePath string, t *testing.T, options ...Option) *KindTest {
|
||||
|
||||
k := &KindTest{
|
||||
clusterName: kindClusterName,
|
||||
timeout: 10 * time.Minute,
|
||||
kindConfigPath: filePath,
|
||||
testInstance: t,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(k)
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
// Prepare the kind cluster.
|
||||
func (k *KindTest) Create() error {
|
||||
err := k.RunCmd("kind", "create", "cluster", "--name", k.clusterName, "--config", k.kindConfigPath)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cluster: %v", err)
|
||||
}
|
||||
|
||||
for _, img := range k.localImages {
|
||||
if err := k.RunCmd("kind", "load", "docker-image", "--name", k.clusterName, img); err != nil {
|
||||
return fmt.Errorf("failed to load image: %v", err)
|
||||
}
|
||||
}
|
||||
for _, mf := range k.deployManifests {
|
||||
kubectlContext := fmt.Sprintf("kind-%v", k.clusterName)
|
||||
if err := k.RunCmd("kubectl", "--context", kubectlContext, "apply", "-f", mf); err != nil {
|
||||
return fmt.Errorf("failed to deploy manifest: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KindTest) Destroy() error {
|
||||
if k.logsDir != "" {
|
||||
if err := k.RunCmd("kind", "export", "logs", k.logsDir, "--name", k.clusterName); err != nil {
|
||||
return fmt.Errorf("failed to export logs: %v. will not teardown", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := k.RunCmd("kind", "delete", "cluster", "--name", k.clusterName); err != nil {
|
||||
return fmt.Errorf("failed to destroy cluster: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestE2EWithCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
var kindClusterConfigs = []string{
|
||||
"previous",
|
||||
"current",
|
||||
"next",
|
||||
}
|
||||
// Iterate over each Kubernetes version
|
||||
for _, version := range kindClusterConfigs {
|
||||
version := version
|
||||
// Define a subtest for each combination
|
||||
t.Run(version, func(t *testing.T) {
|
||||
t.Parallel() // Allow tests to run in parallel
|
||||
|
||||
randomInt := strconv.Itoa(rand.Intn(100))
|
||||
kindClusterName := fmt.Sprintf("kured-e2e-command-%v-%v", version, randomInt)
|
||||
kindClusterConfigFile := fmt.Sprintf("../../.github/kind-cluster-%v.yaml", version)
|
||||
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
|
||||
|
||||
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy("testfiles/kured-ds.yaml"))
|
||||
defer k.FlushLog()
|
||||
|
||||
err := k.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating cluster %v", err)
|
||||
}
|
||||
defer func(k *KindTest) {
|
||||
err := k.Destroy()
|
||||
if err != nil {
|
||||
t.Fatalf("Error destroying cluster %v", err)
|
||||
}
|
||||
}(k)
|
||||
|
||||
k.Write([]byte("Now running e2e tests"))
|
||||
|
||||
if err := k.RunCmd("bash", "testfiles/create-reboot-sentinels.sh", kindContext); err != nil {
|
||||
t.Fatalf("failed to create sentinels: %v", err)
|
||||
}
|
||||
|
||||
if err := k.RunCmd("bash", "testfiles/follow-coordinated-reboot.sh", kindContext); err != nil {
|
||||
t.Fatalf("failed to follow reboot: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestE2EWithSignal(t *testing.T) {
|
||||
t.Parallel()
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
var kindClusterConfigs = []string{
|
||||
"previous",
|
||||
"current",
|
||||
"next",
|
||||
}
|
||||
// Iterate over each Kubernetes version
|
||||
for _, version := range kindClusterConfigs {
|
||||
version := version
|
||||
// Define a subtest for each combination
|
||||
t.Run(version, func(t *testing.T) {
|
||||
t.Parallel() // Allow tests to run in parallel
|
||||
|
||||
randomInt := strconv.Itoa(rand.Intn(100))
|
||||
kindClusterName := fmt.Sprintf("kured-e2e-signal-%v-%v", version, randomInt)
|
||||
kindClusterConfigFile := fmt.Sprintf("../../.github/kind-cluster-%v.yaml", version)
|
||||
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
|
||||
|
||||
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy("testfiles/kured-ds-signal.yaml"))
|
||||
defer k.FlushLog()
|
||||
|
||||
err := k.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating cluster %v", err)
|
||||
}
|
||||
defer func(k *KindTest) {
|
||||
err := k.Destroy()
|
||||
if err != nil {
|
||||
t.Fatalf("Error destroying cluster %v", err)
|
||||
}
|
||||
}(k)
|
||||
|
||||
k.Write([]byte("Now running e2e tests"))
|
||||
|
||||
if err := k.RunCmd("bash", "testfiles/create-reboot-sentinels.sh", kindContext); err != nil {
|
||||
t.Fatalf("failed to create sentinels: %v", err)
|
||||
}
|
||||
|
||||
if err := k.RunCmd("bash", "testfiles/follow-coordinated-reboot.sh", kindContext); err != nil {
|
||||
t.Fatalf("failed to follow reboot: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestE2EConcurrentWithCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
var kindClusterConfigs = []string{
|
||||
"previous",
|
||||
"current",
|
||||
"next",
|
||||
}
|
||||
// Iterate over each Kubernetes version
|
||||
for _, version := range kindClusterConfigs {
|
||||
version := version
|
||||
// Define a subtest for each combination
|
||||
t.Run(version, func(t *testing.T) {
|
||||
t.Parallel() // Allow tests to run in parallel
|
||||
|
||||
randomInt := strconv.Itoa(rand.Intn(100))
|
||||
kindClusterName := fmt.Sprintf("kured-e2e-concurrentcommand-%v-%v", version, randomInt)
|
||||
kindClusterConfigFile := fmt.Sprintf("../../.github/kind-cluster-%v.yaml", version)
|
||||
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
|
||||
|
||||
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy("testfiles/kured-ds-concurrent-command.yaml"))
|
||||
defer k.FlushLog()
|
||||
|
||||
err := k.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating cluster %v", err)
|
||||
}
|
||||
defer func(k *KindTest) {
|
||||
err := k.Destroy()
|
||||
if err != nil {
|
||||
t.Fatalf("Error destroying cluster %v", err)
|
||||
}
|
||||
}(k)
|
||||
|
||||
k.Write([]byte("Now running e2e tests"))
|
||||
|
||||
if err := k.RunCmd("bash", "testfiles/create-reboot-sentinels.sh", kindContext); err != nil {
|
||||
t.Fatalf("failed to create sentinels: %v", err)
|
||||
}
|
||||
|
||||
if err := k.RunCmd("bash", "testfiles/follow-coordinated-reboot.sh", kindContext); err != nil {
|
||||
t.Fatalf("failed to follow reboot: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestE2EConcurrentWithSignal(t *testing.T) {
|
||||
t.Parallel()
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
var kindClusterConfigs = []string{
|
||||
"previous",
|
||||
"current",
|
||||
"next",
|
||||
}
|
||||
// Iterate over each Kubernetes version
|
||||
for _, version := range kindClusterConfigs {
|
||||
version := version
|
||||
// Define a subtest for each combination
|
||||
t.Run(version, func(t *testing.T) {
|
||||
t.Parallel() // Allow tests to run in parallel
|
||||
|
||||
randomInt := strconv.Itoa(rand.Intn(100))
|
||||
kindClusterName := fmt.Sprintf("kured-e2e-concurrentsignal-%v-%v", version, randomInt)
|
||||
kindClusterConfigFile := fmt.Sprintf("../../.github/kind-cluster-%v.yaml", version)
|
||||
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
|
||||
|
||||
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy("testfiles/kured-ds-concurrent-signal.yaml"))
|
||||
defer k.FlushLog()
|
||||
|
||||
err := k.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating cluster %v", err)
|
||||
}
|
||||
defer func(k *KindTest) {
|
||||
err := k.Destroy()
|
||||
if err != nil {
|
||||
t.Fatalf("Error destroying cluster %v", err)
|
||||
}
|
||||
}(k)
|
||||
|
||||
k.Write([]byte("Now running e2e tests"))
|
||||
|
||||
if err := k.RunCmd("bash", "testfiles/create-reboot-sentinels.sh", kindContext); err != nil {
|
||||
t.Fatalf("failed to create sentinels: %v", err)
|
||||
}
|
||||
|
||||
if err := k.RunCmd("bash", "testfiles/follow-coordinated-reboot.sh", kindContext); err != nil {
|
||||
t.Fatalf("failed to follow reboot: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCordonningIsKept(t *testing.T) {
|
||||
t.Parallel()
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
var kindClusterConfigs = []string{
|
||||
"concurrency1",
|
||||
"concurrency2",
|
||||
}
|
||||
// Iterate over each test variant
|
||||
for _, variant := range kindClusterConfigs {
|
||||
variant := variant
|
||||
// Define a subtest for each combination
|
||||
t.Run(variant, func(t *testing.T) {
|
||||
t.Parallel() // Allow tests to run in parallel
|
||||
|
||||
randomInt := strconv.Itoa(rand.Intn(100))
|
||||
kindClusterName := fmt.Sprintf("kured-e2e-cordon-%v-%v", variant, randomInt)
|
||||
kindClusterConfigFile := "../../.github/kind-cluster-next.yaml"
|
||||
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
|
||||
|
||||
var manifest string
|
||||
if variant == "concurrency1" {
|
||||
manifest = "testfiles/kured-ds-signal.yaml"
|
||||
} else {
|
||||
manifest = "testfiles/kured-ds-concurrent-signal.yaml"
|
||||
}
|
||||
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy(manifest))
|
||||
defer k.FlushLog()
|
||||
|
||||
err := k.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating cluster %v", err)
|
||||
}
|
||||
defer func(k *KindTest) {
|
||||
err := k.Destroy()
|
||||
if err != nil {
|
||||
t.Fatalf("Error destroying cluster %v", err)
|
||||
}
|
||||
}(k)
|
||||
|
||||
k.Write([]byte("Now running e2e tests"))
|
||||
|
||||
if err := k.RunCmd("bash", "testfiles/node-stays-as-cordonned.sh", kindContext); err != nil {
|
||||
t.Fatalf("node did not reboot in time: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestE2EBlocker(t *testing.T) {
|
||||
t.Parallel()
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
var kindClusterConfigs = []string{
|
||||
"podblocker",
|
||||
}
|
||||
// Iterate over each variant of the test
|
||||
for _, variant := range kindClusterConfigs {
|
||||
variant := variant
|
||||
// Define a subtest for each combination
|
||||
t.Run(variant, func(t *testing.T) {
|
||||
t.Parallel() // Allow tests to run in parallel
|
||||
|
||||
randomInt := strconv.Itoa(rand.Intn(100))
|
||||
kindClusterName := fmt.Sprintf("kured-e2e-cordon-%v-%v", variant, randomInt)
|
||||
kindClusterConfigFile := "../../.github/kind-cluster-next.yaml"
|
||||
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
|
||||
|
||||
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy(fmt.Sprintf("testfiles/kured-ds-%v.yaml", variant)))
|
||||
defer k.FlushLog()
|
||||
|
||||
err := k.Create()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating cluster %v", err)
|
||||
}
|
||||
defer func(k *KindTest) {
|
||||
err := k.Destroy()
|
||||
if err != nil {
|
||||
t.Fatalf("Error destroying cluster %v", err)
|
||||
}
|
||||
}(k)
|
||||
|
||||
k.Write([]byte("Now running e2e tests"))
|
||||
|
||||
if err := k.RunCmd("bash", fmt.Sprintf("testfiles/%v.sh", variant), kindContext); err != nil {
|
||||
t.Fatalf("node blocker test did not succeed: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
11
tests/kind/testfiles/create-reboot-sentinels.sh
Executable file
11
tests/kind/testfiles/create-reboot-sentinels.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
kubectl_flags=( )
|
||||
[[ "$1" != "" ]] && kubectl_flags=("${kubectl_flags[@]}" --context "$1")
|
||||
|
||||
# To speed up the system, let's not kill the control plane.
|
||||
for nodename in $(${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes -o name | grep -v control-plane); do
|
||||
echo "Creating reboot sentinel on $nodename"
|
||||
docker exec "${nodename/node\//}" hostname
|
||||
docker exec "${nodename/node\//}" touch "${SENTINEL_FILE:-/var/run/reboot-required}"
|
||||
done
|
||||
@@ -1,11 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
NODECOUNT=${NODECOUNT:-5}
|
||||
KUBECTL_CMD="${KUBECTL_CMD:-kubectl}"
|
||||
REBOOTCOUNT=${REBOOTCOUNT:-2} # By default we only create two sentinels in create-reboot-sentinels.
|
||||
DEBUG="${DEBUG:-false}"
|
||||
CONTAINER_NAME_FORMAT=${CONTAINER_NAME_FORMAT:-"chart-testing-*"}
|
||||
|
||||
kubectl_flags=( )
|
||||
[[ "$1" != "" ]] && kubectl_flags=("${kubectl_flags[@]}" --context "$1")
|
||||
|
||||
tmp_dir=$(mktemp -d -t kured-XXXX)
|
||||
|
||||
function gather_logs_and_cleanup {
|
||||
if [[ -f "$tmp_dir"/node_output ]]; then
|
||||
rm "$tmp_dir"/node_output
|
||||
@@ -18,15 +21,15 @@ function gather_logs_and_cleanup {
|
||||
# This is useful to see if containers have crashed.
|
||||
echo "docker ps -a:"
|
||||
docker ps -a
|
||||
echo "docker journal logs"
|
||||
journalctl -u docker --no-pager
|
||||
echo "docker journal logs"
|
||||
journalctl -u docker --no-pager
|
||||
|
||||
# This is useful to see if the nodes have _properly_ rebooted.
|
||||
# It should show the reboot/two container starts per node.
|
||||
for name in $(docker ps -a -f "name=${CONTAINER_NAME_FORMAT}" -q); do
|
||||
for id in $(docker ps -a -q); do
|
||||
echo "############################################################"
|
||||
echo "docker logs for container $name:"
|
||||
docker logs "$name"
|
||||
echo "docker logs for container $id:"
|
||||
docker logs "$id"
|
||||
done
|
||||
|
||||
fi
|
||||
@@ -35,30 +38,28 @@ trap gather_logs_and_cleanup EXIT
|
||||
|
||||
declare -A was_unschedulable
|
||||
declare -A has_recovered
|
||||
max_attempts="60"
|
||||
sleep_time=60
|
||||
max_attempts="200"
|
||||
sleep_time=5
|
||||
attempt_num=1
|
||||
|
||||
# Get docker info of each of those kind containers. If one has crashed, restart it.
|
||||
|
||||
set +o errexit
|
||||
echo "There are $NODECOUNT nodes in the cluster"
|
||||
until [ ${#was_unschedulable[@]} == "$NODECOUNT" ] && [ ${#has_recovered[@]} == "$NODECOUNT" ]
|
||||
echo "There are $REBOOTCOUNT nodes total needing reboot in the cluster"
|
||||
until [ ${#was_unschedulable[@]} == "$REBOOTCOUNT" ] && [ ${#has_recovered[@]} == "$REBOOTCOUNT" ]
|
||||
do
|
||||
echo "${#was_unschedulable[@]} nodes were removed from pool once:" "${!was_unschedulable[@]}"
|
||||
echo "${#has_recovered[@]} nodes removed from the pool are now back:" "${!has_recovered[@]}"
|
||||
|
||||
#"$KUBECTL_CMD" logs -n kube-system -l name=kured --ignore-errors > "$tmp_dir"/node_output
|
||||
#if [[ "$DEBUG" == "true" ]]; then
|
||||
# echo "Kured pod logs:"
|
||||
# cat "$tmp_dir"/node_output
|
||||
#fi
|
||||
|
||||
"$KUBECTL_CMD" get nodes -o custom-columns=NAME:.metadata.name,SCHEDULABLE:.spec.unschedulable --no-headers > "$tmp_dir"/node_output
|
||||
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes -o custom-columns=NAME:.metadata.name,SCHEDULABLE:.spec.unschedulable --no-headers | grep -v control-plane > "$tmp_dir"/node_output
|
||||
if [[ "$DEBUG" == "true" ]]; then
|
||||
# This is useful to see if a node gets stuck after drain, and doesn't
|
||||
# come back up.
|
||||
echo "Result of command $KUBECTL_CMD get nodes ... showing unschedulable nodes:"
|
||||
echo "Result of command kubectl unschedulable nodes:"
|
||||
cat "$tmp_dir"/node_output
|
||||
fi
|
||||
|
||||
while read -r node; do
|
||||
unschedulable=$(echo "$node" | grep true | cut -f 1 -d ' ')
|
||||
if [ -n "$unschedulable" ] && [ -z ${was_unschedulable["$unschedulable"]+x} ] ; then
|
||||
@@ -70,9 +71,15 @@ do
|
||||
echo "$schedulable has recovered!"
|
||||
has_recovered["$schedulable"]=1
|
||||
fi
|
||||
|
||||
# If the container has crashed, restart it.
|
||||
node_name=$(echo "$node" | cut -f 1 -d ' ')
|
||||
stopped_container_id=$(docker container ls --filter=name="$node_name" --filter=status=exited -q)
|
||||
if [ -n "$stopped_container_id" ]; then echo "Node $stopped_container_id needs restart"; docker start "$stopped_container_id"; echo "Container started."; fi
|
||||
|
||||
done < "$tmp_dir"/node_output
|
||||
|
||||
if [[ "${#has_recovered[@]}" == "$NODECOUNT" ]]; then
|
||||
if [[ "${#has_recovered[@]}" == "$REBOOTCOUNT" ]]; then
|
||||
echo "All nodes recovered."
|
||||
break
|
||||
else
|
||||
59
tests/kind/testfiles/node-stays-as-cordonned.sh
Executable file
59
tests/kind/testfiles/node-stays-as-cordonned.sh
Executable file
@@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
kubectl_flags=( )
|
||||
[[ "$1" != "" ]] && kubectl_flags=("${kubectl_flags[@]}" --context "$1")
|
||||
|
||||
cordon() {
|
||||
kubectl "${kubectl_flags[@]}" cordon "${precordonned_node}"
|
||||
}
|
||||
|
||||
create_sentinel() {
|
||||
docker exec "${precordonned_node}" touch "${SENTINEL_FILE:-/var/run/reboot-required}"
|
||||
docker exec "${notcordonned_node}" touch "${SENTINEL_FILE:-/var/run/reboot-required}"
|
||||
}
|
||||
|
||||
check_reboot_required() {
|
||||
while true;
|
||||
do
|
||||
docker exec "${precordonned_node}" stat /var/run/reboot-required > /dev/null && echo "Reboot still required" || return 0
|
||||
sleep 3
|
||||
done
|
||||
}
|
||||
|
||||
check_node_back_online_as_cordonned() {
|
||||
sleep 5 # For safety, wait for 5 seconds, so that the kubectl command succeeds.
|
||||
# This test might be giving us false positive until we work on reliability of the
|
||||
# test.
|
||||
while true;
|
||||
do
|
||||
result=$(kubectl "${kubectl_flags[@]}" get node "${precordonned_node}" --no-headers | awk '{print $2;}')
|
||||
test "${result}" != "Ready,SchedulingDisabled" && echo "Node ${precordonned_node} in state ${result}" || return 0
|
||||
sleep 3
|
||||
done
|
||||
}
|
||||
|
||||
check_node_back_online_as_uncordonned() {
|
||||
while true;
|
||||
do
|
||||
result=$(kubectl "${kubectl_flags[@]}" get node "${notcordonned_node}" --no-headers | awk '{print $2;}')
|
||||
test "${result}" != "Ready" && echo "Node ${notcordonned_node} in state ${result}" || return 0
|
||||
sleep 3
|
||||
done
|
||||
}
|
||||
### Start main
|
||||
|
||||
worker_nodes=$(${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes -o custom-columns=name:metadata.name --no-headers | grep worker)
|
||||
precordonned_node=$(echo "$worker_nodes" | head -n 1)
|
||||
notcordonned_node=$(echo "$worker_nodes" | tail -n 1)
|
||||
|
||||
# Wait for kured to install correctly
|
||||
sleep 15
|
||||
cordon
|
||||
create_sentinel
|
||||
check_reboot_required
|
||||
echo "Node has rebooted, but may take time to come back ready"
|
||||
check_node_back_online_as_cordonned
|
||||
check_node_back_online_as_uncordonned
|
||||
echo "Showing final node state"
|
||||
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes
|
||||
echo "Test successful"
|
||||
54
tests/kind/testfiles/podblocker.sh
Executable file
54
tests/kind/testfiles/podblocker.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
kubectl_flags=( )
|
||||
[[ "$1" != "" ]] && kubectl_flags=("${kubectl_flags[@]}" --context "$1")
|
||||
|
||||
function gather_logs_and_cleanup {
|
||||
for id in $(docker ps -q); do
|
||||
echo "############################################################"
|
||||
echo "docker logs for container $id:"
|
||||
docker logs "$id"
|
||||
done
|
||||
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" logs ds/kured --all-pods -n kube-system
|
||||
}
|
||||
trap gather_logs_and_cleanup EXIT
|
||||
|
||||
set +o errexit
|
||||
worker=$(${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes -o custom-columns=name:metadata.name --no-headers | grep worker | head -n 1)
|
||||
|
||||
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" label nodes "$worker" blocked-host=yes
|
||||
|
||||
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app: blocker
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
nodeSelector:
|
||||
blocked-host: "yes"
|
||||
EOF
|
||||
|
||||
docker exec "$worker" touch "${SENTINEL_FILE:-/var/run/reboot-required}"
|
||||
|
||||
set -o errexit
|
||||
max_attempts="100"
|
||||
attempt_num=1
|
||||
sleep_time=5
|
||||
|
||||
until ${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" logs ds/kured --all-pods -n kube-system | grep -i -e "Reboot.*blocked"
|
||||
do
|
||||
if (( attempt_num == max_attempts )); then
|
||||
echo "Attempt $attempt_num failed and there are no more attempts left!"
|
||||
exit 1
|
||||
else
|
||||
echo "Did not find 'reboot blocked' in the log, retrying in $sleep_time seconds (Attempt #$attempt_num)"
|
||||
sleep "$sleep_time"
|
||||
fi
|
||||
(( attempt_num++ ))
|
||||
done
|
||||
Reference in New Issue
Block a user