Compare commits

..

2 Commits
main ... 1.91c

Author SHA1 Message Date
David Shay
e1db60b2b5 Requested changes to multi-arch build 2022-01-14 09:39:48 -05:00
David Shay
f3295b99ef Added support for multi-arch image build 2022-01-12 10:31:26 -05:00
90 changed files with 4123 additions and 4174 deletions

View File

@@ -1,3 +0,0 @@
exemptions:
- check: analytics
reason: "We don't track people"

7
.github/ct.yaml vendored Normal file
View File

@@ -0,0 +1,7 @@
# See https://github.com/helm/chart-testing#configuration
remote: origin
target-branch: main
chart-dirs:
- charts
chart-repos: []
helm-extra-args: --timeout 600s

View File

@@ -16,6 +16,6 @@ updates:
- dependency-name: "k8s.io/client-go"
- dependency-name: "k8s.io/kubectl"
- package-ecosystem: "docker"
directory: "/"
directory: "cmd/kured"
schedule:
interval: "daily"

13
.github/kind-cluster-1.21.yaml vendored Normal file
View File

@@ -0,0 +1,13 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.21.2
- role: control-plane
image: kindest/node:v1.21.2
- role: control-plane
image: kindest/node:v1.21.2
- role: worker
image: kindest/node:v1.21.2
- role: worker
image: kindest/node:v1.21.2

13
.github/kind-cluster-1.22.yaml vendored Normal file
View File

@@ -0,0 +1,13 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: kindest/node:v1.22.4
- role: control-plane
image: kindest/node:v1.22.4
- role: control-plane
image: kindest/node:v1.22.4
- role: worker
image: kindest/node:v1.22.4
- role: worker
image: kindest/node:v1.22.4

13
.github/kind-cluster-1.23.yaml vendored Normal file
View File

@@ -0,0 +1,13 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.23.0"
- role: control-plane
image: "kindest/node:v1.23.0"
- role: control-plane
image: "kindest/node:v1.23.0"
- role: worker
image: "kindest/node:v1.23.0"
- role: worker
image: "kindest/node:v1.23.0"

View File

@@ -1,9 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.34.0"
- role: worker
image: "kindest/node:v1.34.0"
- role: worker
image: "kindest/node:v1.34.0"

View File

@@ -1,9 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.35.0"
- role: worker
image: "kindest/node:v1.35.0"
- role: worker
image: "kindest/node:v1.35.0"

View File

@@ -1,9 +0,0 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.33.4"
- role: worker
image: "kindest/node:v1.33.4"
- role: worker
image: "kindest/node:v1.33.4"

View File

@@ -1,83 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
workflow_dispatch:
push:
branches: [ "main" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "main" ]
schedule:
- cron: '24 13 * * 6'
permissions:
contents: read
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6
with:
category: "/language:${{matrix.language}}"

View File

@@ -1,28 +0,0 @@
# Dependency Review Action
#
# This Action will scan dependency manifest files that change as part of a Pull Request,
# surfacing known-vulnerable versions of the packages declared or updated in the PR.
# Once installed, if the workflow run is marked as required,
# PRs introducing known-vulnerable packages will be blocked from merging.
#
# Source repository: https://github.com/actions/dependency-review-action
name: 'Dependency Review'
on: [pull_request]
permissions:
contents: read
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: 'Checkout Repository'
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: 'Dependency Review'
uses: actions/dependency-review-action@40c09b7dc99638e5ddb0bfd91c1673effc064d8a # v4.8.1

View File

@@ -0,0 +1,19 @@
name: Publish helm chart
on:
push:
branches:
- "main"
paths:
- "charts/**"
jobs:
publish-helm-chart:
name: Publish latest chart
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
charts_dir: charts

View File

@@ -5,85 +5,55 @@ on:
push:
branches:
- main
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
permissions:
contents: read
jobs:
tag-scan-and-push-final-image:
name: "Build, scan, and publish tagged image"
runs-on: ubuntu-latest
permissions:
id-token: write
contents: write
packages: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/checkout@v2
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Find go version
run: |
GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
echo "::set-output name=version::${GO_VERSION}"
id: awk_gomod
- name: Ensure go version
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@v2
with:
go-version-file: 'go.mod'
check-latest: true
go-version: "${{ steps.awk_gomod.outputs.version }}"
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
version: 2025.10.5
username: ${{ secrets.DOCKERHUB_USERNAME_WEAVEWORKSKUREDCI }}
password: ${{ secrets.DOCKERHUB_TOKEN_WEAVEWORKSKUREDCI }}
- name: Login to ghcr.io
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
uses: docker/login-action@v1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
registry: ghcr.io
username: weave-ghcr-bot
password: ${{ secrets.KURED_WEAVE_GHCR_BOT_TOKEN }}
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
id: buildx
uses: docker/setup-buildx-action@v1
- name: Find current tag version
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
run: echo "::set-output name=sha_short::$(git rev-parse --short HEAD)"
id: tags
- name: Build binaries
run: make kured-release-snapshot
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build image
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
uses: docker/build-push-action@v2
with:
context: .
platforms: linux/arm64, linux/amd64, linux/arm/v7, linux/arm/v6, linux/386
file: cmd/kured/Dockerfile.multi
platforms: linux/arm64, linux/amd64
push: true
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
- name: Generate SBOM
run: |
syft ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }} -o spdx > kured.sbom
- name: Sign and attest artifacts
run: |
cosign sign -y -r ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
cosign sign-blob -y --output-signature kured.sbom.sig --output-certificate kured.sbom.pem kured.sbom
cosign attest -y --type spdx --predicate kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
cosign attach sbom --type spdx --sbom kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
docker.io/${{ GITHUB.REPOSITORY }}:main-${{ steps.tags.outputs.sha_short }}
ghcr.io/${{ GITHUB.REPOSITORY }}:main-${{ steps.tags.outputs.sha_short }}

78
.github/workflows/on-pr-charts.yaml vendored Normal file
View File

@@ -0,0 +1,78 @@
#This is just extra testing, for lint check, and basic installation
#Those can fail earlier than functional tests (shorter tests)
# and give developer feedback soon if they didn't test themselves
name: PR - charts
on:
pull_request:
paths:
- "charts/**"
jobs:
# We create two jobs (with a matrix) instead of one to make those parallel.
# We don't need to conditionally check if something has changed, due to github actions
# tackling that for us.
# Fail-fast ensures that if one of those matrix job fail, the other one gets cancelled.
test-chart:
name: Test helm chart changes
runs-on: ubuntu-latest
strategy:
fail-fast: true
matrix:
test-action:
- lint
- install
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: "0"
- uses: actions/setup-python@v2
with:
python-version: 3.7
# Helm is already present in github actions, so do not re-install it
- name: Setup chart testing
uses: helm/chart-testing-action@v2.2.0
- name: Create default kind cluster
uses: helm/kind-action@v1.2.0
with:
version: v0.11.0
if: ${{ matrix.test-action == 'install' }}
- name: Run chart tests
run: ct ${{ matrix.test-action }} --config .github/ct.yaml
# This doesn't re-use the ct actions, due to many limitations (auto tear down, no real testing)
deploy-chart:
name: Functional test of helm chart in its current state (needs published image of the helm chart)
runs-on: ubuntu-latest
needs: test-chart
steps:
- uses: actions/checkout@v2
# Default name for helm/kind-action kind clusters is "chart-testing"
- name: Create 1 node kind cluster
uses: helm/kind-action@v1.2.0
with:
version: v0.11.0
- name: Deploy kured on default namespace with its helm chart
run: |
# Documented in official helm doc to live on the edge
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Refresh bins
hash -r
helm install kured ./charts/kured/ --set configuration.period=1m --wait
kubectl config set-context kind-chart-testing
kubectl get ds --all-namespaces
kubectl describe ds kured
- name: Test if successful deploy
uses: nick-invision/retry@v2.6.0
with:
timeout_minutes: 10
max_attempts: 10
retry_wait_seconds: 10
# DESIRED CURRENT READY UP-TO-DATE AVAILABLE should all be = to cluster_size
command: "kubectl get ds kured | grep -E 'kured.*1.*1.*1.*1.*1'"

View File

@@ -1,26 +0,0 @@
name: Verify Docs Links
on:
pull_request:
push:
paths:
- '**.md'
jobs:
pr-check-docs-links:
name: Check docs for incorrect links
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Link Checker
uses: lycheeverse/lychee-action@a8c4c7cb88f0c7386610c35eb25108e448569cb0 # v2.7.0
env:
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
with:
args: --verbose --no-progress '*.md' '*.yaml' '*/*/*.go' --exclude-link-local
fail: true

View File

@@ -4,83 +4,94 @@ on:
push:
jobs:
pr-short-tests:
name: Run short go tests
runs-on: ubuntu-latest
pr-gotest:
name: Run go tests
runs-on: ubuntu-18.04
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
uses: actions/checkout@v2
- name: Find go version
run: |
GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
echo "::set-output name=version::${GO_VERSION}"
id: awk_gomod
- name: Ensure go version
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@v2
with:
go-version-file: 'go.mod'
check-latest: true
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
with:
version: 2025.10.5
go-version: "${{ steps.awk_gomod.outputs.version }}"
- name: run tests
run: make test
run: go test -json ./... > test.json
- name: Annotate tests
if: always()
uses: guyarb/golang-test-annoations@2941118d7ef622b1b3771d1ff6eae9e90659eb26 # v0.8.0
uses: guyarb/golang-test-annoations@v0.5.0
with:
test-results: test.json
pr-shellcheck:
name: Lint bash code with shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run ShellCheck
uses: bewuethr/shellcheck-action@v2
pr-lint-code:
name: Lint golang code
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Find go version
run: |
GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
echo "::set-output name=version::${GO_VERSION}"
id: awk_gomod
- name: Ensure go version
uses: actions/setup-go@v2
with:
go-version: "${{ steps.awk_gomod.outputs.version }}"
- name: Lint cmd folder
uses: Jerome1337/golint-action@v1.0.2
with:
golint-path: './cmd/...'
- name: Lint pkg folder
uses: Jerome1337/golint-action@v1.0.2
with:
golint-path: './pkg/...'
pr-check-docs-links:
name: Check docs for incorrect links
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Link Checker
id: lc
uses: peter-evans/link-checker@v1
with:
args: -r *.md *.yaml */*/*.go -x .cluster.local
- name: Fail if there were link errors
run: exit ${{ steps.lc.outputs.exit_code }}
# This should not be made a mandatory test
# It is only used to make us aware of any potential security failure that
# It is only used to make us aware of any potential security failure, that
# should trigger a bump of the image in build/.
pr-vuln-scan:
name: Build image and scan it against known vulnerabilities
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v2
- name: Find go version
run: |
GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
echo "::set-output name=version::${GO_VERSION}"
id: awk_gomod
- name: Ensure go version
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@v2
with:
go-version-file: 'go.mod'
check-latest: true
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
go-version: "${{ steps.awk_gomod.outputs.version }}"
- run: make DH_ORG="${{ github.repository_owner }}" VERSION="${{ github.sha }}" image
- uses: Azure/container-scan@v0
with:
version: 2025.10.5
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Find current tag version
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
id: tags
- name: Build image
run: VERSION="${{ steps.tags.outputs.sha_short }}" DH_ORG="${{ github.repository_owner }}" make image
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
with:
image-ref: 'ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }}'
format: 'table'
exit-code: '1'
ignore-unfixed: true
vuln-type: 'os,library'
severity: 'CRITICAL,HIGH'
image-name: docker.io/${{ github.repository_owner }}/kured:${{ github.sha }}
# This ensures the latest code works with the manifests built from tree.
# It is useful for two things:
@@ -88,91 +99,238 @@ jobs:
# - Ensure manifests work with the latest versions even with no manifest change
# (compared to helm charts, manifests cannot easily template changes based on versions)
# Helm charts are _trailing_ releases, while manifests are done during development.
# This test uses the "command" reboot-method.
e2e-manifests:
name: End-to-End test with kured with code and manifests from HEAD
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
testname:
- "TestE2EWithCommand"
- "TestE2EWithSignal"
- "TestE2EConcurrentWithCommand"
- "TestE2EConcurrentWithSignal"
kubernetes_version:
- "previous"
- "current"
- "next"
kubernetes:
- "1.21"
- "1.22"
- "1.23"
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v2
- name: Find go version
run: |
GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
echo "::set-output name=version::${GO_VERSION}"
id: awk_gomod
- name: Ensure go version
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@v2
with:
go-version-file: 'go.mod'
check-latest: true
go-version: "${{ steps.awk_gomod.outputs.version }}"
- name: Build artifacts
run: |
make DH_ORG="${{ github.repository_owner }}" VERSION="${{ github.sha }}" image
make DH_ORG="${{ github.repository_owner }}" VERSION="${{ github.sha }}" manifest
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
- name: Workaround "Failed to attach 1 to compat systemd cgroup /actions_job/..." on gh actions
run: |
sudo bash << EOF
cp /etc/docker/daemon.json /etc/docker/daemon.json.old
echo '{}' > /etc/docker/daemon.json
systemctl restart docker || journalctl --no-pager -n 500
systemctl status docker
EOF
# Default name for helm/kind-action kind clusters is "chart-testing"
- name: Create kind cluster with 5 nodes
uses: helm/kind-action@v1.2.0
with:
version: 2025.10.5
config: .github/kind-cluster-${{ matrix.kubernetes }}.yaml
version: v0.11.0
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- name: Preload previously built images onto kind cluster
run: kind load docker-image docker.io/${{ github.repository_owner }}/kured:${{ github.sha }} --name chart-testing
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Do not wait for an hour before detecting the rebootSentinel
run: |
sed -i 's/#\(.*\)--period=1h/\1--period=30s/g' kured-ds.yaml
- name: Find current tag version
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
id: tags
- name: Install kured with kubectl
run: |
kubectl apply -f kured-rbac.yaml && kubectl apply -f kured-ds.yaml
- name: Run specific e2e tests
run: make e2e-test ARGS="-run ^${{ matrix.testname }}/${{ matrix.kubernetes_version }}"
- name: Ensure kured is ready
uses: nick-invision/retry@v2.6.0
with:
timeout_minutes: 10
max_attempts: 10
retry_wait_seconds: 60
# DESIRED CURRENT READY UP-TO-DATE AVAILABLE should all be = to cluster_size
command: "kubectl get ds -n kube-system kured | grep -E 'kured.*5.*5.*5.*5.*5'"
- name: Create reboot sentinel files
run: |
./tests/kind/create-reboot-sentinels.sh
e2e-tests-singleversion:
name: End-to-End test targetting a single version of kubernetes
- name: Follow reboot until success
env:
DEBUG: true
run: |
./tests/kind/follow-coordinated-reboot.sh
scenario-prom-helm:
name: Test prometheus with latest code from HEAD (=overrides image of the helm chart)
runs-on: ubuntu-latest
# only build with oldest and newest supported, it should be good enough.
strategy:
fail-fast: false
matrix:
testname:
- "TestCordonningIsKept/concurrency1"
- "TestCordonningIsKept/concurrency2"
- "TestE2EBlocker/podblocker"
kubernetes:
- "1.21"
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v2
- name: Find go version
run: |
GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
echo "::set-output name=version::${GO_VERSION}"
id: awk_gomod
- name: Ensure go version
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@v2
with:
go-version-file: 'go.mod'
check-latest: true
go-version: "${{ steps.awk_gomod.outputs.version }}"
- name: Build artifacts
run: |
make DH_ORG="${{ github.repository_owner }}" VERSION="${{ github.sha }}" image
make DH_ORG="${{ github.repository_owner }}" VERSION="${{ github.sha }}" helm-chart
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
- name: Workaround 'Failed to attach 1 to compat systemd cgroup /actions_job/...' on gh actions
run: |
sudo bash << EOF
cp /etc/docker/daemon.json /etc/docker/daemon.json.old
echo '{}' > /etc/docker/daemon.json
systemctl restart docker || journalctl --no-pager -n 500
systemctl status docker
EOF
# Default name for helm/kind-action kind clusters is "chart-testing"
- name: Create 1 node kind cluster
uses: helm/kind-action@v1.2.0
with:
version: 2025.10.5
version: v0.11.0
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- name: Preload previously built images onto kind cluster
run: kind load docker-image docker.io/${{ github.repository_owner }}/kured:${{ github.sha }} --name chart-testing
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Deploy kured on default namespace with its helm chart
run: |
# Documented in official helm doc to live on the edge
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Refresh bins
hash -r
helm install kured ./charts/kured/ --wait --values ./charts/kured/ci/prometheus-values.yaml
kubectl config set-context kind-chart-testing
kubectl get ds --all-namespaces
kubectl describe ds kured
- name: Find current tag version
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
id: tags
- name: Ensure kured is ready
uses: nick-invision/retry@v2.6.0
with:
timeout_minutes: 10
max_attempts: 10
retry_wait_seconds: 60
# DESIRED CURRENT READY UP-TO-DATE AVAILABLE
command: "kubectl get ds kured | grep -E 'kured.*1.*1.*1.*1.*1' "
- name: Run specific e2e tests
run: make e2e-test ARGS="-run ^${{ matrix.testname }}"
- name: Get metrics (healthy)
uses: nick-invision/retry@v2.6.0
with:
timeout_minutes: 2
max_attempts: 12
retry_wait_seconds: 5
command: "./tests/kind/test-metrics.sh 0"
- name: Create reboot sentinel files
run: |
./tests/kind/create-reboot-sentinels.sh
- name: Get metrics (need reboot)
uses: nick-invision/retry@v2.6.0
with:
timeout_minutes: 15
max_attempts: 10
retry_wait_seconds: 60
command: "./tests/kind/test-metrics.sh 1"
# TEMPLATE Scenario testing.
# Note: keep in mind that the helm chart's appVersion is overriden to test your HEAD of the branch,
# if you `make helm-chart`.
# This will allow you to test properly your scenario and not use an existing image which will not
# contain your feature.
# scenario-<REPLACETHIS>-helm:
# #example: Testing <REPLACETHIS> with helm chart and code from HEAD"
# name: "<REPLACETHIS>"
# runs-on: ubuntu-latest
# strategy:
# fail-fast: false
# # You can define your own kubernetes versions. For example if your helm chart change should behave differently with different kubernetes versions.
# matrix:
# kubernetes:
# - "1.20"
# steps:
# - uses: actions/checkout@v2
# - name: Find go version
# run: |
# GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
# echo "::set-output name=version::${GO_VERSION}"
# id: awk_gomod
# - name: Ensure go version
# uses: actions/setup-go@v2
# with:
# go-version: "${{ steps.awk_gomod.outputs.version }}"
# - name: Build artifacts
# run: |
# make DH_ORG="${{ github.repository_owner }}" VERSION="${{ github.sha }}" image
# make DH_ORG="${{ github.repository_owner }}" VERSION="${{ github.sha }}" helm-chart
#
# - name: "Workaround 'Failed to attach 1 to compat systemd cgroup /actions_job/...' on gh actions"
# run: |
# sudo bash << EOF
# cp /etc/docker/daemon.json /etc/docker/daemon.json.old
# echo '{}' > /etc/docker/daemon.json
# systemctl restart docker || journalctl --no-pager -n 500
# systemctl status docker
# EOF
#
# # Default name for helm/kind-action kind clusters is "chart-testing"
# - name: Create 5 node kind cluster
# uses: helm/kind-action@master
# with:
# config: .github/kind-cluster-${{ matrix.kubernetes }}.yaml
#
# - name: Preload previously built images onto kind cluster
# run: kind load docker-image docker.io/${{ github.repository_owner }}/kured:${{ github.sha }} --name chart-testing
#
# - name: Deploy kured on default namespace with its helm chart
# run: |
# # Documented in official helm doc to live on the edge
# curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# # Refresh bins
# hash -r
# helm install kured ./charts/kured/ --wait --values ./charts/kured/ci/<REPLACETHIS>-values.yaml
# kubectl config set-context kind-chart-testing
# kubectl get ds --all-namespaces
# kubectl describe ds kured
#
# - name: Ensure kured is ready
# uses: nick-invision/retry@v2.6.0
# with:
# timeout_minutes: 10
# max_attempts: 10
# retry_wait_seconds: 60
# # DESIRED CURRENT READY UP-TO-DATE AVAILABLE should all be = 5
# command: "kubectl get ds kured | grep -E 'kured.*5.*5.*5.*5.*5' "
#
# - name: Create reboot sentinel files
# run: |
# ./tests/kind/create-reboot-sentinels.sh
#
# - name: Test <REPLACETHIS>
# env:
# DEBUG: true
# run: |
# <TODO>

View File

@@ -7,104 +7,57 @@ on:
push:
tags:
- "*"
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
permissions:
contents: read
jobs:
tag-scan-and-push-final-image:
name: "Build, scan, and publish tagged image"
runs-on: ubuntu-latest
permissions:
id-token: write
contents: write
packages: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v2
- name: Find go version
run: |
GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
echo "::set-output name=version::${GO_VERSION}"
id: awk_gomod
- name: Ensure go version
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
uses: actions/setup-go@v2
with:
go-version-file: 'go.mod'
check-latest: true
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
with:
version: 2025.10.5
go-version: "${{ steps.awk_gomod.outputs.version }}"
- name: Find current tag version
run: echo "version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
run: echo "::set-output name=version::${GITHUB_REF#refs/tags/}"
id: tags
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Build binaries
run: make kured-release-tag
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build single image for scan
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
- run: |
make DH_ORG="${{ github.repository_owner }}" VERSION="${{ steps.tags.outputs.version }}" image
- uses: Azure/container-scan@v0
with:
context: .
push: false
load: true
tags: |
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
image-name: docker.io/${{ github.repository_owner }}/kured:${{ steps.tags.outputs.version }}
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
- name: Login to DockerHub
uses: docker/login-action@v1
with:
image-ref: '${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}'
format: 'table'
exit-code: '1'
ignore-unfixed: true
vuln-type: 'os,library'
severity: 'CRITICAL,HIGH'
username: ${{ secrets.DOCKERHUB_USERNAME_WEAVEWORKSKUREDCI }}
password: ${{ secrets.DOCKERHUB_TOKEN_WEAVEWORKSKUREDCI }}
- name: Login to ghcr.io
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
uses: docker/login-action@v1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io
username: weave-ghcr-bot
password: ${{ secrets.KURED_WEAVE_GHCR_BOT_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Build release images
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Build image
uses: docker/build-push-action@v2
with:
context: .
file: cmd/kured/Dockerfile.multi
platforms: linux/arm64, linux/amd64, linux/arm/v7, linux/arm/v6, linux/386
push: true
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
- name: Generate SBOM
run: |
syft ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }} -o spdx > kured.sbom
- name: Sign and attest artifacts
run: |
cosign sign -y -r ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
cosign sign-blob -y --output-signature kured.sbom.sig kured.sbom
cosign attest -y --type spdx --predicate kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
cosign attach sbom --type spdx --sbom kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
docker.io/${{ GITHUB.REPOSITORY }}:${{ steps.tags.outputs.version }}
ghcr.io/${{ GITHUB.REPOSITORY }}:${{ steps.tags.outputs.version }}

136
.github/workflows/periodics-daily.yaml vendored Normal file
View File

@@ -0,0 +1,136 @@
name: Daily jobs
on:
schedule:
- cron: "30 1 * * *"
jobs:
periodics-gotest:
name: Run go tests
runs-on: ubuntu-18.04
steps:
- name: checkout
uses: actions/checkout@v2
- name: run tests
run: go test -json ./... > test.json
- name: Annotate tests
if: always()
uses: guyarb/golang-test-annoations@v0.5.0
with:
test-results: test.json
periodics-mark-stale:
name: Mark stale issues and PRs
runs-on: ubuntu-latest
steps:
# Stale by default waits for 60 days before marking PR/issues as stale, and closes them after 21 days.
# Do not expire the first issues that would allow the community to grow.
- uses: actions/stale@v4
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'This issue was automatically considered stale due to lack of activity. Please update it and/or join our slack channels to promote it, before it automatically closes (in 7 days).'
stale-pr-message: 'This PR was automatically considered stale due to lack of activity. Please refresh it and/or join our slack channels to highlight it, before it automatically closes (in 7 days).'
stale-issue-label: 'no-issue-activity'
stale-pr-label: 'no-pr-activity'
exempt-issue-labels: 'good first issue,keep'
days-before-close: 21
check-docs-links:
name: Check docs for incorrect links
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Link Checker
id: lc
uses: peter-evans/link-checker@v1
with:
args: -r *.md *.yaml */*/*.go -x .cluster.local
- name: Fail if there were link errors
run: exit ${{ steps.lc.outputs.exit_code }}
vuln-scan:
name: Build image and scan it against known vulnerabilities
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Find go version
run: |
GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
echo "::set-output name=version::${GO_VERSION}"
id: awk_gomod
- name: Ensure go version
uses: actions/setup-go@v2
with:
go-version: "${{ steps.awk_gomod.outputs.version }}"
- run: make DH_ORG="${{ github.repository_owner }}" VERSION="${{ github.sha }}" image
- uses: Azure/container-scan@v0
with:
image-name: docker.io/${{ github.repository_owner }}/kured:${{ github.sha }}
deploy-helm:
name: Ensure our currently released helm chart works on all kubernetes versions
runs-on: ubuntu-latest
# only build with oldest and newest supported, it should be good enough.
strategy:
matrix:
kubernetes:
- "1.21"
- "1.22"
- "1.23"
steps:
- uses: actions/checkout@v2
- name: Find go version
run: |
GO_VERSION=$(awk '/^go/ {print $2};' go.mod)
echo "::set-output name=version::${GO_VERSION}"
id: awk_gomod
- name: Ensure go version
uses: actions/setup-go@v2
with:
go-version: "${{ steps.awk_gomod.outputs.version }}"
- name: "Workaround 'Failed to attach 1 to compat systemd cgroup /actions_job/...' on gh actions"
run: |
sudo bash << EOF
cp /etc/docker/daemon.json /etc/docker/daemon.json.old
echo '{}' > /etc/docker/daemon.json
systemctl restart docker || journalctl --no-pager -n 500
systemctl status docker
EOF
# Default name for helm/kind-action kind clusters is "chart-testing"
- name: Create 5 node kind cluster
uses: helm/kind-action@v1.2.0
with:
config: .github/kind-cluster-${{ matrix.kubernetes }}.yaml
version: v0.11.0
- name: Deploy kured on default namespace with its helm chart
run: |
# Documented in official helm doc to live on the edge
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Refresh bins
hash -r
helm install kured ./charts/kured/ --set configuration.period=1m
kubectl config set-context kind-chart-testing
kubectl get ds --all-namespaces
kubectl describe ds kured
- name: Ensure kured is ready
uses: nick-invision/retry@v2.6.0
with:
timeout_minutes: 10
max_attempts: 10
retry_wait_seconds: 60
# DESIRED CURRENT READY UP-TO-DATE AVAILABLE should all be = 5
command: "kubectl get ds kured | grep -E 'kured.*5.*5.*5.*5.*5' "
- name: Create reboot sentinel files
run: |
./tests/kind/create-reboot-sentinels.sh
- name: Follow reboot until success
env:
DEBUG: true
run: |
./tests/kind/follow-coordinated-reboot.sh

View File

@@ -1,116 +0,0 @@
name: Daily jobs
on:
schedule:
- cron: "30 1 * * 6"
jobs:
periodics-gotest:
name: Run go tests
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
with:
version: 2025.10.5
- name: run tests
run: make test
- name: Annotate tests
if: always()
uses: guyarb/golang-test-annoations@2941118d7ef622b1b3771d1ff6eae9e90659eb26 # v0.8.0
with:
test-results: test.json
periodics-mark-stale:
name: Mark stale issues and PRs
runs-on: ubuntu-latest
steps:
# Stale, by default, waits for 60 days before marking PR/issues as stale and closes them after 21 days.
# Do not expire the first issues that would allow the community to grow.
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'This issue was automatically considered stale due to lack of activity. Please update it and/or join our slack channels to promote it, before it automatically closes (in 7 days).'
stale-pr-message: 'This PR was automatically considered stale due to lack of activity. Please refresh it and/or join our slack channels to highlight it, before it automatically closes (in 7 days).'
stale-issue-label: 'no-issue-activity'
stale-pr-label: 'no-pr-activity'
exempt-issue-labels: 'good first issue,keep'
exempt-pr-labels: 'keep'
days-before-close: 21
check-docs-links:
name: Check docs for incorrect links
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Link Checker
uses: lycheeverse/lychee-action@a8c4c7cb88f0c7386610c35eb25108e448569cb0
env:
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
with:
args: --verbose --no-progress '*.md' '*.yaml' '*/*/*.go' --exclude-link-local
fail: true
vuln-scan:
name: Build image and scan it against known vulnerabilities
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Ensure go version
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
with:
go-version-file: 'go.mod'
check-latest: true
- uses: jdx/mise-action@146a28175021df8ca24f8ee1828cc2a60f980bd5 # v3.5.1
with:
version: 2025.10.5
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
- name: Find current tag version
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
id: tags
- name: Build artifacts
run: VERSION="${{ steps.tags.outputs.sha_short }}" DH_ORG="${{ github.repository_owner }}" make image
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
with:
image-ref: 'ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }}'
format: 'table'
exit-code: '1'
ignore-unfixed: true
vuln-type: 'os,library'
severity: 'CRITICAL,HIGH'

View File

@@ -1,78 +0,0 @@
# This workflow uses actions that are not certified by GitHub. They are provided
# by a third-party and are governed by separate terms of service, privacy
# policy, and support documentation.
name: Scorecard supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '34 3 * * 6'
push:
branches: [ "main" ]
# Declare default permissions as read only.
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
# Uncomment the permissions below if installing in a private repository.
# contents: read
# actions: read
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: "Checkout code"
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
with:
results_file: results.sarif
results_format: sarif
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
# - you want to enable the Branch-Protection check on a *public* repository, or
# - you are installing Scorecard on a *private* repository
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
# Public repositories:
# - Publish results to OpenSSF REST API for easy access by consumers
# - Allows the repository to include the Scorecard badge.
# - See https://github.com/ossf/scorecard-action#publishing-results.
# For private repositories:
# - `publish_results` will always be set to `false`, regardless
# of the value entered here.
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v3.pre.node20
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6
with:
sarif_file: results.sarif

3
.gitignore vendored
View File

@@ -1,6 +1,3 @@
cmd/kured/kured
vendor
build
dist
test.json
tests/kind/testfiles/*.yaml

View File

@@ -1,37 +0,0 @@
version: "2"
#timeout : 5m we can add this if needed
modules-download-mode: readonly
run:
tests: false
linters:
enable:
- govet
- staticcheck
- unused
- contextcheck
- goconst
- gosec
- testifylint
- errcheck
- revive
linters-settings:
errcheck:
check-type-assertions: true
check-blank: true
revive:
severity: warning
confidence: 0.8
rules:
- name: indent-error-flow
- name: var-naming
- name: import-shadowing
# https://github.com/mgechev/revive/blob/HEAD/RULES_DESCRIPTIONS.md#package-comments
- name: package-comments # This is not working!
disabled: true
output:
format: colored-line-number
print-issued-lines: true
print-linter-name: true
uniq-by-line: false
sort-results: true

View File

@@ -1,32 +0,0 @@
project_name: kured
before:
hooks:
- go mod tidy
builds:
- main: ./cmd/kured
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
- arm64
- arm
- "386"
goarm:
- "6"
- "7"
ldflags:
- -s -w -X main.version={{ if .IsSnapshot }}{{ .ShortCommit }}{{ else }}{{ .Version }}{{ end }}
mod_timestamp: "{{ .CommitTimestamp }}"
flags:
- -trimpath
snapshot:
name_template: "{{ .ShortCommit }}"
release:
disable: true
changelog:
skip: true

View File

@@ -1,6 +0,0 @@
app.fossa.com
cluster.local
hooks.slack.com
localhost
slack://
teams://

View File

@@ -1,8 +0,0 @@
[tools]
cosign = "2.2.3"
golangci-lint = "2.1.6"
goreleaser = "1.24.0"
kind = "0.31.0"
kubectl = "1.35.0"
shellcheck = "0.11.0"
syft = "1.0.1"

View File

View File

@@ -1,3 +0,0 @@
# Kured Community Code of Conduct
Kured follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).

View File

@@ -1,357 +0,0 @@
# Developing `kured`
We love contributions to `kured`, no matter if you are [helping out on
Slack][slack], reporting or triaging [issues][issues] or contributing code
to `kured`.
In any case, it will make sense to familiarise yourself with the main
[documentation][documentation] to understand the different features and
options, which is helpful for testing. The "building" section in
particular makes sense if you are planning to contribute code.
[slack]: https://github.com/kubereboot/kured/blob/main/README.md#getting-help
[issues]: https://github.com/kubereboot/kured/issues
[documentation]: https://kured.dev/docs
## Prepare your environment
### Your IDE
![JetBrains logo](https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.png)
The core team has access to Goland from [JetBrains][JetBrains]. Huge thanks to them for their sponsorship!
You can use the IDE you prefer. Don't include anything from your IDE in the .gitignore. Please do so in your global .gitignore.
[JetBrains]: https://www.jetbrains.com/
### Basic binaries required
Your system needs at least the following binaries installed:
- make
- sed
- find
- bash (command, echo)
- docker (for docker buildx)
- go (see the required version in go.mod)
- mise (to manage developer tools. See [mise installation instructions](https://mise.jdx.dev/installing-mise.html).)
### Fetch the additional binaries required
Please run `make install-tools` once on a fresh repository clone to download necessary developer tools.
Installed tools are listed in [.mise directory](.mise/config.toml).
### Configure your git for the "Certificate of Origin"
By contributing to this project, you agree to the Developer Certificate of
Origin (DCO). This document was created by the Linux Kernel community and is a
simple statement that you, as a contributor, have the legal right to make the
contribution.
We require all commits to be signed. By signing off with your signature, you
certify that you wrote the patch or otherwise have the right to contribute the
material by the rules of the [DCO](DCO):
`Signed-off-by: Jane Doe <jane.doe@example.com>`
The signature must contain your real name
(sorry, no pseudonyms or anonymous contributions)
If your `user.name` and `user.email` are configured in your Git config,
you can sign your commit automatically with `git commit -s`.
## Get to know Kured repositories
All Kured repositories are kept under <https://github.com/kubereboot>. To find the code and work on the individual pieces that make Kured, here is our overview:
| Repositories | Contents |
|-----------------------------------------|---------------------------|
| <https://github.com/kubereboot/kured> | Kured operator itself |
| <https://github.com/kubereboot/charts> | Helm chart |
| <https://github.com/kubereboot/website> | website and documentation |
We use GitHub Actions in all our repositories.
### Charts repo structure highlights
- We use GitHub Actions to do the chart testing. Only linting/installation happens, no e2e test is done.
- [charts/kured](https://github.com/kubereboot/charts/tree/main/charts/kured) is the place to contribute changes to the chart. Please bump [Chart.yaml](https://github.com/kubereboot/charts/blob/main/charts/kured/Chart.yaml) at each change according to semver.
### Kured repo structure highlights
Kured's main code can be found in the [`cmd`](cmd) and [`pkg`](pkg) directories
Keep in mind we always want to guarantee that `kured` works for the previous, current, and next
minor version of kubernetes according to `client-go` and `kubectl` dependencies in use.
Our e2e tests are in the [`tests`](tests) directory. These are deep tests using our manifests with different params, on all supported k8s versions of a release.
They are expensive but allow us to catch many issues quickly. If you want to ensure your scenario works, add an e2e test for it! Those e2e tests are encouraged by the maintainer team (See below).
We also have other tests:
- golangci-lint , shellcheck
- a security check against our base image (alpine)
All these tests are run on every PR/tagged release. See [.github/workflows](.github/workflows) for more details.
We use [GoReleaser to build](.goreleaser.yml).
## Regular development activities / maintenance
### Updating k8s support
At each new major release of kubernetes, we update our dependencies.
Beware that whenever we want to update e.g. the `kubectl` or `client-go` dependencies, some other impactful changes might be necessary too.
(RBAC, drain behaviour changes, ...)
A few examples of what it took to support:
- Kubernetes 1.10 <https://github.com/kubereboot/kured/commit/b3f9ddf> + <https://github.com/kubereboot/kured/commit/bc3f28d> + <https://github.com/kubereboot/kured/commit/908998a> + <https://github.com/kubereboot/kured/commit/efbb0c3> + <https://github.com/kubereboot/kured/commit/5731b98>
- Kubernetes 1.14 <https://github.com/kubereboot/kured/pull/75>
- Kubernetes 1.34 <https://github.com/kubereboot/kured/commit/6ab853dd711ee264663184976ae492a20b657b0a>
Search the git log for inspiration for your cases.
In general, the following activities have to happen:
- Bump kind and its images (see below)
- `go get k8s.io/kubectl@v0.{version}`
### bump kind images support
Go to `.github/workflows` and update the new k8s images. For that:
- `cp .github/kind-cluster-current.yaml .github/kind-cluster-previous.yaml`
- `cp .github/kind-cluster-next.yaml .github/kind-cluster-current.yaml`
- Then edit `.github/kind-cluster-next.yaml` to point to the new version.
This will make the full test matrix updated (the CI and the test code).
Once your code passes all tests, update the support matrix in
the [installation docs](https://kured.dev/docs/installation/).
Beware that sometimes you also need to update the Kind version. grep in the [.github/workflows](.github/workflows) for the kind version.
### Updating other dependencies
Dependabot proposes changes in our `go.mod`/`go.sum`.
CI testing covers some of those changes, but not all.
Please make sure to test those not covered by CI (mostly the integration
with other tools) manually before merging.
In all cases, review dependabot changes: Imagine all changes as a possible supply chain
attack vector. You then need to review the proposed changes by dependabot and evaluate the trust/risks.
### Review periodic jobs
We run periodic jobs (see also Automated testing section of this documentation).
Those should be monitored for failures.
If a failure happens in periodic testing, something terribly wrong must have happened
(or GitHub is failing at the creation of a kind cluster). Please monitor those
failures carefully.
## Testing kured
If you have developed anything (or just want to take kured for a spin!), run the following tests.
As they will run in CI, we will quickly catch if you did not test before submitting your PR.
### Linting
We use [`golangci-lint`](https://golangci-lint.run/) for Go code linting.
To run lint checks locally:
```bash
make lint
```
### Quick Golang code testing
Please run `make test` to run only the basic tests. It gives a good
idea of the code behaviour.
### Functional testing
For functional testing, the maintainer team is using `minikube` or `kind` (explained below), but also encourages you to test kured on your own cluster(s).
#### Testing on your own cluster
This will allow the community to catch issues that might not have been tested in our CI, like integration with other tools, or your specific use case.
To test kured on your own cluster, make sure you pass the right `image`, update the `period` and `reboot-days` (so you get immediate results), and update any other flags for your cases.
Then login to a node and run:
```console
sudo touch /var/run/reboot-required
```
Then tell us about everything that went well or went wrong in Slack.
#### Testing with `minikube`
A test-run with `minikube` could look like this:
```cli
# start minikube
minikube start --driver=kvm2 --kubernetes-version <k8s-release>
# build kured image and publish to registry accessible by minikube
make image minikube-publish
# edit kured-ds.yaml to
# - point to new image
# - change e.g. period and reboot-days option for immediate results
minikube kubectl -- apply -f kured-rbac.yaml
minikube kubectl -- apply -f kured-ds.yaml
minikube kubectl -- logs daemonset.apps/kured -n kube-system -f
# In separate terminal
minikube ssh
sudo touch /var/run/reboot-required
minikube logs -f
```
Now check for the 'Commanding reboot' message and minikube going down.
Unfortunately, as of today, you are going to run into
<https://github.com/kubernetes/minikube/issues/2874>. This means that
minikube won't come back easily. You will need to start minikube again.
Then you can check for the lock release.
#### Testing with `kind` "The hard way"
A test-run with `kind` could look like this:
```cli
# create kind cluster
kind create cluster --config .github/kind-cluster-<k8s-version>.yaml
# create reboot required files on pre-defined kind nodes
./tests/kind/create-reboot-sentinels.sh
# check if reboot is working fine
./tests/kind/follow-coordinated-reboot.sh
```
### Testing with `kind` "The easy way"
You can automate the test with `kind` by using the same code as the CI.
```cli
# Build kured:dev image, build manifests, and run the "long" go tests
make e2e-test
```
You can alter the test behaviour by passing arguments to this command.
A few examples are given below:
```shell
# Run only TestE2EWithSignal test for the kubernetes version named "current" (see kind file)
make e2e-test ARGS="-run ^TestE2EWithSignal/current"
# Run all tests but make sure to extend the timeout, for slower machines.
make e2e-test ARGS="-timeout 1200s'
```
## Introducing new features
When you introduce a new feature, the kured team expects you to have tested (see above!)
your change thoroughly. If possible, include all the necessary testing in your change.
If your change involves a user facing change (change in flags of kured for example),
please include expose your new feature in our default manifest (`kured-ds.yaml`),
as a comment.
Our release manifests and helm charts are our stable interfaces.
Any user facing changes will therefore have to wait for a release before being
exposed to our users.
This also means that when you expose a new feature, you should create another PR
for your changes in <https://github.com/kubereboot/charts> to make your feature
available at the next kured version for helm users.
In the charts PR, you can directly bump the `appVersion` to the next minor version
(you are introducing a new feature, which requires a bump of the minor number.
For example, if current `appVersion` is `1.6.x`, make sure you update your `appVersion`
to `1.7.0`). It allows us to have an easy view of what we land each release.
Do not hesitate to increase the test coverage for your feature, whether it's unit testing
to full functional testing (even using helm charts).
The team of kured is small, so we will most likely refuse any feature adding maintenance burden.
## Introducing changes in the helm chart
When you change the helm chart, remember to bump its version according to semver.
Changes to defaults are frowned upon unless absolutely necessary.
## Introducing new tests / increasing test coverage
On the opposite of features, we welcome ALL features increasing our stability and test coverage.
See also our GitHub issues with the label [`testing`](https://github.com/kubereboot/kured/labels/testing).
## Publishing a new kured release
### Double-check the latest kubernetes patch version
Ensure you have used the latest patch version in the tree.
Check the documentation "Updating k8s support" if the minor version was not yet applied.
### Update the manifests with the new version
```sh
export VERSION=1.20.0
make DH_ORG="kubereboot" VERSION="${VERSION}" manifest
```
Create a commit updating the manifest with future image [like this one](https://github.com/kubereboot/kured/commit/58091f6145771f426b4b9e012a43a9c847af2560).
### Create the combined manifest for the new release
Now create the `kured-<new version>-combined.yaml` for e.g. `1.20.0`:
```sh
export VERSION=1.20.0
export MANIFEST="kured-$VERSION-combined.yaml"
make DH_ORG="kubereboot" VERSION="${VERSION}" manifest # just to be safe
cat kured-rbac.yaml > "$MANIFEST"
cat kured-ds.yaml >> "$MANIFEST"
```
### Create the new version tag on the repo (optional, can also be done directly in GH web interface)
Tag the previously created commit with the future release version.
The GitHub Actions workflow will push the new image to the registry.
### Publish new version release artifacts
Now you can head to the GitHub UI for releases, drafting a new
release. Chose, as tag, the new version number.
Click to generate the release notes.
Fill, as name, "Kured <new version>".
Edit the generated text.
Please describe what's new and noteworthy in the release notes, list the PRs
that landed and give a shout-out to everyone who contributed.
Please also note down on which releases the upcoming `kured` release was
tested on or what it supports. (Check old release notes if you're unsure.)
Before clicking on publishing release, upload the yaml manifest
(`kured-<new version>-combined.yaml`) file.
Click on publish the release and set as the latest release.
### Prepare Helm chart
Create a commit to [bump the chart and kured version like this one](https://github.com/kubereboot/charts/commit/e0191d91c21db8338be8cbe56f8991a557048110).
### Prepare Documentation
Ensure the [compatibility matrix](https://kured.dev/docs/installation/) is updated to the new version you want to release.

36
DCO
View File

@@ -1,36 +0,0 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

View File

@@ -1 +1,235 @@
This file was moved to [CONTRIBUTING.md](CONTRIBUTING.md).
# Developing `kured`
We love contributions to `kured`, no matter if you are [helping out on
Slack][slack], reporting or triaging [issues][issues] or contributing code
to `kured`.
In any case, it will make sense to familiarise yourself with the main
[README][readme] to understand the different features and options, which is
helpful for testing. The "building" section in particular makes sense if
you are planning to contribute code.
[slack]: README.md#getting-help
[issues]: https://github.com/weaveworks/kured/issues
[readme]: README.md
## Regular development activities
### Updating k8s support
Whenever we want to update e.g. the `kubectl` or `client-go` dependencies,
some RBAC changes might be necessary too.
This is what it took to support Kubernetes 1.14:
<https://github.com/weaveworks/kured/pull/75>
That the process can be more involved that that can be seen in
<https://github.com/weaveworks/kured/commits/support-k8s-1.10>
Please update our .github/workflows with the new k8s images, starting by
the creation of a .github/kind-cluster-<version>.yaml, then updating
our workflows with the new versions.
Once you updated everything, make sure you update the support matrix on
the main [README][readme] as well.
### Updating other dependencies
Dependabot proposes changes in our go.mod/go.sum.
Some of those changes are covered by CI testing, some are not.
Please make sure to test those not covered by CI (mostly the integration
with other tools) manually before merging.
### Review periodic jobs
We run periodic jobs (see also Automated testing section of this documentation).
Those should be monitored for failures.
If a failure happen in periodics, something terribly wrong must have happened
(or github is failing at the creation of a kind cluster). Please monitor those
failures carefully.
### Introducing new features
When you introduce a new feature, the kured team expects you to have tested
your change thoroughly. If possible, include all the necessary testing in your change.
If your change involves a user facing change (change in flags of kured for example),
please include expose your new feature in our default manifest (`kured-ds.yaml`),
as a comment.
Do not update the helm chart directly.
Helm charts and our release manifests (see below) are our stable interfaces.
Any user facing changes will therefore have to wait for a while before being
exposed to our users.
This also means that when you expose a new feature, you should create another PR
for your changes in `charts/` to make your feature available for our next kured version.
In this change, you can directly bump the appVersion to the next minor version.
(for example, if current appVersion is 1.6.x, make sure you update your appVersion
to 1.7.0). It allows us to have an easy view of what we land each release.
Do not hesitate to increase the test coverage for your feature, whether it's unit
testing to full functional testing (even using helm charts)
### Increasing test coverage
We are welcoming any change to increase our test coverage.
See also our github issues for the label `testing`.
### Updating helm charts
Helm charts are continuously published. Any change in `charts/` will be immediately
pushed in production.
## Automated testing
Our CI is covered by github actions.
You can see their contents in .github/workflows.
We currently run:
- go tests and lint
- shellcheck
- a check for dead links in our docs
- a security check against our base image (alpine)
- a deep functional test using our manifests on all supported k8s versions
- basic deployment using our helm chart on any chart change
Changes in helm charts are not functionally tested on PRs. We assume that
the PRs to implement the feature are properly tested by our users and
contributors before merge.
To test your code manually, follow the section Manual testing.
## Manual (release) testing
Before `kured` is released, we want to make sure it still works fine on the
previous, current and next minor version of Kubernetes (with respect to the
`client-go` & `kubectl` dependencies in use). For local testing e.g.
`minikube` or `kind` can be sufficient. This will allow you to catch issues
that might not have been tested in our CI, like integration with other tools,
or your specific use case.
Deploy kured in your test scenario, make sure you pass the right `image`,
update the e.g. `period` and `reboot-days` options, so you get immediate
results, if you login to a node and run:
```console
sudo touch /var/run/reboot-required
```
### Example of golang testing
Please run `make test`. You should have golint installed.
### Example of testing with `minikube`
A test-run with `minikube` could look like this:
```console
# start minikube
minikube start --vm-driver kvm2 --kubernetes-version <k8s-release>
# build kured image and publish to registry accessible by minikube
make image minikube-publish
# edit kured-ds.yaml to
# - point to new image
# - change e.g. period and reboot-days option for immediate results
minikube kubectl -- apply -f kured-rbac.yaml
minikube kubectl -- apply -f kured-ds.yaml
minikube kubectl -- logs daemonset.apps/kured -n kube-system -f
# Alternatively use helm to install the chart
# edit values-local.yaml to change any chart parameters
helm install kured ./charts/kured --namespace kube-system -f ./charts/kured/values.minikube.yaml
# In separate terminal
minikube ssh
sudo touch /var/run/reboot-required
minikube logs -f
```
Now check for the 'Commanding reboot' message and minikube going down.
Unfortunately as of today, you are going to run into
<https://github.com/kubernetes/minikube/issues/2874>. This means that
minikube won't come back easily. You will need to start minikube again.
Then you can check for the lock release.
If all the tests ran well, kured maintainers can reach out to the Weaveworks
team to get an upcoming `kured` release tested in the Dev environment for
real life testing.
### Example of testing with `kind`
A test-run with `kind` could look like this:
```console
# create kind cluster
kind create cluster --config .github/kind-cluster-<k8s-version>.yaml
# create reboot required files on pre-defined kind nodes
./tests/kind/create-reboot-sentinels.sh
# check if reboot is working fine
./tests/kind/follow-coordinated-reboot.sh
```
## Publishing a new kured release
### Prepare Documentation
Check that `README.md` has an updated compatibility matrix and that the
url in the `kubectl` incantation (under "Installation") is updated to the
new version you want to release.
### Create a tag on the repo
Before going further, we should freeze the code for a release, by
tagging the code. The Github-Action should start a new job and push
the new image to the registry.
### Create the combined manifest
Now create the `kured-<release>-dockerhub.yaml` for e.g. `1.3.0`:
```sh
VERSION=1.3.0
MANIFEST="kured-$VERSION-dockerhub.yaml"
make DH_ORG="weaveworks" VERSION="${VERSION}" manifest
cat kured-rbac.yaml > "$MANIFEST"
cat kured-ds.yaml >> "$MANIFEST"
```
### Publish release artifacts
Now you can head to the Github UI, use the version number as tag and upload the
`kured-<release>-dockerhub.yaml` file.
Please describe what's new and noteworthy in the release notes, list the PRs
that landed and give a shout-out to everyone who contributed.
Please also note down on which releases the upcoming `kured` release was
tested on. (Check old release notes if you're unsure.)
### Update the Helm chart
You can automatically bump the helm chart's application version
with the latest image tag by running:
```sh
make DH_ORG="weaveworks" VERSION="1.3.0" helm-chart
```
A change in the helm chart requires a bump of the `version`
in `charts/kured/Chart.yaml` (following the versioning rules).
Update it, and issue a PR. Upon merge, that PR will automatically
publish the chart to the gh-pages branch.
When there are open helm-chart PRs which are on hold until the helm-chart has been updated
with the new kured version, they can be merged now (unless a rebase is needed from the contributor).

View File

@@ -1,25 +0,0 @@
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS bin
ARG TARGETOS
ARG TARGETARCH
ARG TARGETVARIANT
COPY dist/ /dist
RUN set -ex \
&& case "${TARGETARCH}" in \
amd64) \
SUFFIX="_v1" \
;; \
arm) \
SUFFIX="_${TARGETVARIANT:1}" \
;; \
*) \
SUFFIX="" \
;; \
esac \
&& cp /dist/kured_${TARGETOS}_${TARGETARCH}${SUFFIX}/kured /dist/kured;
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659
RUN apk update --no-cache && apk upgrade --no-cache && apk add --no-cache ca-certificates tzdata
COPY --from=bin /dist/kured /usr/bin/kured
ENTRYPOINT ["/usr/bin/kured"]

View File

@@ -1,112 +0,0 @@
# Project Governance
- [Values](#values)
- [Maintainers](#maintainers)
- [Becoming a Maintainer](#becoming-a-maintainer)
- [Meetings](#meetings)
- [Code of Conduct Enforcement](#code-of-conduct)
- [Voting](#voting)
## Values
The Kured project and its leadership embrace the following values:
- Openness: Communication and decision-making happens in the open and is discoverable for future
reference. As much as possible, all discussions and work take place in public
forums and open repositories.
- Fairness: All stakeholders have the opportunity to provide feedback and submit
contributions, which will be considered on their merits.
- Community over Product or Company: Sustaining and growing our community takes
priority over shipping code or sponsors' organizational goals. Each
contributor participates in the project as an individual.
- Inclusivity: We innovate through different perspectives and skill sets, which
can only be accomplished in a welcoming and respectful environment.
- Participation: Responsibilities within the project are earned through
participation, and there is a clear path up the contributor ladder into leadership
positions.
- Consensus: Whether or not wider input is required, the Kured community believes that
the best decisions are reached through Consensus
<https://en.wikipedia.org/wiki/Consensus_decision-making>.
## Maintainers
Kured Maintainers have write access to the [project GitHub
organisation](https://github.com/kubereboot). They can merge their own patches or patches
from others. The current maintainers can be found in [MAINTAINERS][maintainers-file].
Maintainers collectively manage the project's resources and contributors.
This privilege is granted with some expectation of responsibility: maintainers
are people who care about the Kured project and want to help it grow and
improve. A maintainer is not just someone who can make changes, but someone who
has demonstrated their ability to collaborate with the team, get the most
knowledgeable people to review code and docs, contribute high-quality code, and
follow through to fix issues (in code or tests).
A maintainer is a contributor to the project's success and a citizen helping
the project succeed.
## Becoming a Maintainer
To become a Maintainer you need to demonstrate the following:
- commitment to the project:
- participate in discussions, contributions, code and documentation reviews
for 3 months or more and participate in Slack discussions and meetings
if possible,
- perform reviews for 5 non-trivial pull requests,
- contribute 5 non-trivial pull requests and have them merged,
- ability to write quality code and/or documentation,
- ability to collaborate with the team,
- understanding of how the team works (policies, processes for testing and code review, etc),
- understanding of the project's code base and coding and documentation style.
We realise that everybody brings different abilities and qualities to the team, that's
why we are willing to change the rules somewhat depending on the circumstances.
A new Maintainer can apply by proposing a PR to the [MAINTAINERS
file][maintainers-file]. A simple majority vote of existing Maintainers
approves the application.
Maintainers who are selected will be granted the necessary GitHub rights,
and invited to the [private maintainer mailing list][private-list].
## Meetings
Time zones permitting, Maintainers are expected to participate in the public
developer meeting, details can be found [here][meeting-agenda].
Maintainers will also have closed meetings in order to discuss security reports
or Code of Conduct violations. Such meetings should be scheduled by any
Maintainer on receipt of a security issue or CoC report. All current Maintainers
must be invited to such closed meetings, except for any Maintainer who is
accused of a CoC violation.
## Code of Conduct
[Code of Conduct](./CODE_OF_CONDUCT.md) violations by community members will
be discussed and resolved on the [private Maintainer mailing list][private-list].
If the reported CoC violator is a Maintainer, the Maintainers will instead
designate two Maintainers to work with CNCF staff in resolving the report.
## Voting
While most business in Kured is conducted by "lazy consensus", periodically
the Maintainers may need to vote on specific actions or changes.
A vote can be taken in [kured issues labeled 'decision'][decision-issues] or
[the private Maintainer mailing list][private-list] for security or conduct
matters. Votes may also be taken at [the developer meeting][meeting-agenda].
Any Maintainer may demand a vote be taken.
Most votes require a simple majority of all Maintainers to succeed. Maintainers
can be removed by a 2/3 majority vote of all Maintainers, and changes to this
Governance require a 2/3 vote of all Maintainers.
[maintainers-file]: ./MAINTAINERS
[private-list]: cncf-kured-maintainers@lists.cncf.io
[meeting-agenda]: https://docs.google.com/document/d/1AWT8YDdqZY-Se6Y1oAlwtujWLVpNVK2M_F_Vfqw06aI/edit
[decision-issues]: https://github.com/kubereboot/kured/labels/decision

View File

@@ -1,12 +1,5 @@
In alphabetical order:
Dharsan Baskar <git@dharsanb.com> (@dharsanb)
Christian Kotzbauer <christian.kotzbauer@gmail.com> (@ckotzbauer)
Daniel Holbach <daniel@weave.works> (@dholbach)
Hidde Beydals <hidde@weave.works> (@hiddeco)
Jean-Phillipe Evrard <jean-philippe.evrard@suse.com> (@evrardjp)
Jack Francis <jackfrancis@gmail.com> (@jackfrancis)
Jean-Philippe Evrard <open-source@a.spamming.party> (@evrardjp)
Retired maintainers:
- Daniel Holbach
- Christian Hopf
Thank you for your involvement, and let us not say "farewell" ...

View File

@@ -1,72 +1,55 @@
.DEFAULT: all
.PHONY: all clean image minikube-publish manifest test kured-all lint
.PHONY: all clean image publish-image minikube-publish manifest helm-chart test tests
DH_ORG ?= kubereboot
VERSION=$(shell git rev-parse --short HEAD)
DH_ORG=weaveworks
VERSION=$(shell git symbolic-ref --short HEAD)-$(shell git rev-parse --short HEAD)
SUDO=$(shell docker info >/dev/null 2>&1 || echo "sudo -E")
all: image
.PHONY: install-tools
install-tools:
command -v mise 2>&1 || { echo "please install mise to continue" >&2; exit 127; }
mise install
clean:
rm -rf ./dist
rm -f cmd/kured/kured
rm -rf ./build
kured:
goreleaser build --clean --single-target --snapshot
godeps=$(shell go list -f '{{join .Deps "\n"}}' $1 | grep -v /vendor/ | xargs go list -f '{{if not .Standard}}{{ $$dep := . }}{{range .GoFiles}}{{$$dep.Dir}}/{{.}} {{end}}{{end}}')
kured-all:
goreleaser build --clean --snapshot
DEPS=$(call godeps,./cmd/kured)
kured-release-tag:
goreleaser release --clean
cmd/kured/kured: $(DEPS)
cmd/kured/kured: cmd/kured/*.go
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "-X main.version=$(VERSION)" -o $@ cmd/kured/*.go
kured-release-snapshot:
goreleaser release --clean --snapshot
build/.image.done: cmd/kured/Dockerfile cmd/kured/kured
mkdir -p build
cp $^ build
$(SUDO) docker build -t docker.io/$(DH_ORG)/kured -f build/Dockerfile ./build
$(SUDO) docker tag docker.io/$(DH_ORG)/kured docker.io/$(DH_ORG)/kured:$(VERSION)
$(SUDO) docker tag docker.io/$(DH_ORG)/kured ghcr.io/$(DH_ORG)/kured:$(VERSION)
touch $@
image: kured
$(SUDO) docker buildx build --no-cache --load -t ghcr.io/$(DH_ORG)/kured:$(VERSION) .
image: build/.image.done
dev-image: image
$(SUDO) docker tag ghcr.io/$(DH_ORG)/kured:$(VERSION) kured:dev
dev-manifest:
# basic e2e scenario
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' kured-ds.yaml > tests/kind/testfiles/kured-ds.yaml
# signal e2e scenario
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' kured-ds-signal.yaml > tests/kind/testfiles/kured-ds-signal.yaml
# concurrency e2e command scenario
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' -e 's/#\(.*\)--concurrency=1/\1--concurrency=2/g' kured-ds.yaml > tests/kind/testfiles/kured-ds-concurrent-command.yaml
# concurrency e2e signal scenario
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' -e 's/#\(.*\)--concurrency=1/\1--concurrency=2/g' kured-ds-signal.yaml > tests/kind/testfiles/kured-ds-concurrent-signal.yaml
# pod blocker e2e signal scenario
sed -e "s#image: ghcr.io/.*kured.*#image: kured:dev#g" -e 's/#\(.*\)--period=1h/\1--period=20s/g' -e 's/#\(.*\)--blocking-pod-selector=name=temperamental/\1--blocking-pod-selector=app=blocker/g' kured-ds-signal.yaml > tests/kind/testfiles/kured-ds-podblocker.yaml
e2e-test: dev-manifest dev-image
echo "Running ALL go tests"
go test -count=1 -v --parallel 4 ./... $(ARGS)
publish-image: image
$(SUDO) docker push docker.io/$(DH_ORG)/kured:$(VERSION)
$(SUDO) docker push ghcr.io/$(DH_ORG)/kured:$(VERSION)
minikube-publish: image
$(SUDO) docker save ghcr.io/$(DH_ORG)/kured | (eval $$(minikube docker-env) && docker load)
$(SUDO) docker save docker.io/$(DH_ORG)/kured | (eval $$(minikube docker-env) && docker load)
manifest:
sed -i "s#image: ghcr.io/.*kured.*#image: ghcr.io/$(DH_ORG)/kured:$(VERSION)#g" kured-ds.yaml
sed -i "s#image: ghcr.io/.*kured.*#image: ghcr.io/$(DH_ORG)/kured:$(VERSION)#g" kured-ds-signal.yaml
sed -i "s#image: docker.io/.*kured.*#image: docker.io/$(DH_ORG)/kured:$(VERSION)#g" kured-ds.yaml
echo "Please generate combined manifest if necessary"
test: lint
@echo "Running short go tests"
go test -test.short -json ./... > test.json
helm-chart:
sed -i "s#repository:.*/kured#repository: $(DH_ORG)/kured#g" charts/kured/values.yaml
sed -i "s#appVersion:.*#appVersion: \"$(VERSION)\"#g" charts/kured/Chart.yaml
sed -i "s#\`[0-9]*\.[0-9]*\.[0-9]*\`#\`$(VERSION)\`#g" charts/kured/README.md
echo "Please bump version in charts/kured/Chart.yaml"
lint:
@echo "Running shellcheck"
find . -name '*.sh' | xargs -n1 shellcheck
@echo "Running golangci-lint..."
golangci-lint run ./...
lint-docs:
@echo "Running lychee"
mise x lychee@latest -- lychee --verbose --no-progress '*.md' '*.yaml' '*/*/*.go' --exclude-link-local
test: tests
echo "Running go tests"
go test ./...
echo "Running golint on pkg"
golint ./pkg/...
echo "Running golint on cmd"
golint ./cmd/...

397
README.md
View File

@@ -1,18 +1,29 @@
# kured - Kubernetes Reboot Daemon
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/kured)](https://artifacthub.io/packages/helm/kured/kured)
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fkubereboot%2Fkured.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fkubereboot%2Fkured?ref=badge_shield)
[![CLOMonitor](https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/kured/badge)](https://clomonitor.io/projects/cncf/kured)
[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8867/badge)](https://www.bestpractices.dev/projects/8867)
<img src="https://github.com/weaveworks/kured/raw/main/img/logo.png" align="right"/>
<img src="https://github.com/kubereboot/website/raw/main/static/img/kured.png" alt="kured logo" width="200" align="right"/>
- [kured - Kubernetes Reboot Daemon](#kured---kubernetes-reboot-daemon)
- [Introduction](#introduction)
- [Documentation](#documentation)
- [Getting Help](#getting-help)
- [Trademarks](#trademarks)
- [License](#license)
- [Introduction](#introduction)
- [Kubernetes & OS Compatibility](#kubernetes--os-compatibility)
- [Installation](#installation)
- [Configuration](#configuration)
- [Reboot Sentinel File & Period](#reboot-sentinel-file--period)
- [Setting a schedule](#setting-a-schedule)
- [Blocking Reboots via Alerts](#blocking-reboots-via-alerts)
- [Blocking Reboots via Pods](#blocking-reboots-via-pods)
- [Prometheus Metrics](#prometheus-metrics)
- [Notifications](#notifications)
- [Overriding Lock Configuration](#overriding-lock-configuration)
- [Operation](#operation)
- [Testing](#testing)
- [Disabling Reboots](#disabling-reboots)
- [Manual Unlock](#manual-unlock)
- [Automatic Unlock](#automatic-unlock)
- [Delaying Lock Release](#delaying-lock-release)
- [Building](#building)
- [Frequently Asked/Anticipated Questions](#frequently-askedanticipated-questions)
- [Why is there no `latest` tag on Docker Hub?](#why-is-there-no-latest-tag-on-docker-hub)
- [Getting Help](#getting-help)
## Introduction
@@ -20,48 +31,348 @@ Kured (KUbernetes REboot Daemon) is a Kubernetes daemonset that
performs safe automatic node reboots when the need to do so is
indicated by the package management system of the underlying OS.
- Watches for the presence of a reboot sentinel file e.g. `/var/run/reboot-required`
* Watches for the presence of a reboot sentinel file e.g. `/var/run/reboot-required`
or the successful run of a sentinel command.
- Utilises a lock in the API server to ensure only one node reboots at
* Utilises a lock in the API server to ensure only one node reboots at
a time
- Optionally defers reboots in the presence of active Prometheus alerts or selected pods
- Cordons & drains worker nodes before reboot, uncordoning them after
* Optionally defers reboots in the presence of active Prometheus alerts or selected pods
* Cordons & drains worker nodes before reboot, uncordoning them after
## Documentation
## Kubernetes & OS Compatibility
Find all our docs on <https://kured.dev>:
The daemon image contains versions of `k8s.io/client-go` and
`k8s.io/kubectl` (the binary of `kubectl` in older releases) for the purposes of
maintaining the lock and draining worker nodes. Kubernetes aims to provide
forwards and backwards compatibility of one minor version between client and
server:
- [All Kured Documentation](https://kured.dev/docs/)
- [Installing Kured](https://kured.dev/docs/installation/)
- [Configuring Kured](https://kured.dev/docs/configuration/)
- [Operating Kured](https://kured.dev/docs/operation/)
- [Developing Kured](https://kured.dev/docs/development/)
| kured | kubectl | k8s.io/client-go | k8s.io/apimachinery | expected kubernetes compatibility |
|-------|---------|------------------|---------------------|-----------------------------------|
| main | 1.22.4 | v0.22.4 | v0.22.4 | 1.21.x, 1.22.x, 1.23.x |
| 1.9.1 | 1.22.4 | v0.22.4 | v0.22.4 | 1.21.x, 1.22.x, 1.23.x |
| 1.8.1 | 1.21.4 | v0.21.4 | v0.21.4 | 1.20.x, 1.21.x, 1.22.x |
| 1.7.0 | 1.20.5 | v0.20.5 | v0.20.5 | 1.19.x, 1.20.x, 1.21.x |
| 1.6.1 | 1.19.4 | v0.19.4 | v0.19.4 | 1.18.x, 1.19.x, 1.20.x |
| 1.5.1 | 1.18.8 | v0.18.8 | v0.18.8 | 1.17.x, 1.18.x, 1.19.x |
| 1.4.4 | 1.17.7 | v0.17.0 | v0.17.0 | 1.16.x, 1.17.x, 1.18.x |
| 1.3.0 | 1.15.10 | v12.0.0 | release-1.15 | 1.15.x, 1.16.x, 1.17.x |
| 1.2.0 | 1.13.6 | v10.0.0 | release-1.13 | 1.12.x, 1.13.x, 1.14.x |
| 1.1.0 | 1.12.1 | v9.0.0 | release-1.12 | 1.11.x, 1.12.x, 1.13.x |
| 1.0.0 | 1.7.6 | v4.0.0 | release-1.7 | 1.6.x, 1.7.x, 1.8.x |
And there's much more!
See the [release notes](https://github.com/weaveworks/kured/releases)
for specific version compatibility information, including which
combination have been formally tested.
Versions >=1.1.0 enter the host mount namespace to invoke
`systemctl reboot`, so should work on any systemd distribution.
## Installation
To obtain a default installation without Prometheus alerting interlock
or Slack notifications:
```console
latest=$(curl -s https://api.github.com/repos/weaveworks/kured/releases | jq -r .[0].tag_name)
kubectl apply -f "https://github.com/weaveworks/kured/releases/download/$latest/kured-$latest-dockerhub.yaml"
```
If you want to customise the installation, download the manifest and
edit it in accordance with the following section before application.
## Configuration
The following arguments can be passed to kured via the daemonset pod template:
```console
Flags:
--alert-filter-regexp regexp.Regexp alert names to ignore when checking for active alerts
--alert-firing-only bool only consider firing alerts when checking for active alerts
--blocking-pod-selector stringArray label selector identifying pods whose presence should prevent reboots
--drain-grace-period int time in seconds given to each pod to terminate gracefully, if negative, the default value specified in the pod will be used (default: -1)
--skip-wait-for-delete-timeout int when seconds is greater than zero, skip waiting for the pods whose deletion timestamp is older than N seconds while draining a node (default: 0)
--ds-name string name of daemonset on which to place lock (default "kured")
--ds-namespace string namespace containing daemonset on which to place lock (default "kube-system")
--end-time string schedule reboot only before this time of day (default "23:59:59")
--force-reboot bool force a reboot even if the drain is still running (default: false)
--drain-timeout duration timeout after which the drain is aborted (default: 0, infinite time)
-h, --help help for kured
--lock-annotation string annotation in which to record locking node (default "weave.works/kured-node-lock")
--lock-release-delay duration hold lock after reboot by this duration (default: 0, disabled)
--lock-ttl duration expire lock annotation after this duration (default: 0, disabled)
--message-template-drain string message template used to notify about a node being drained (default "Draining node %s")
--message-template-reboot string message template used to notify about a node being rebooted (default "Rebooting node %s")
--notify-url url for reboot notifications (cannot use with --slack-hook-url flags)
--period duration reboot check period (default 1h0m0s)
--prefer-no-schedule-taint string Taint name applied during pending node reboot (to prevent receiving additional pods from other rebooting nodes). Disabled by default. Set e.g. to "weave.works/kured-node-reboot" to enable tainting.
--prometheus-url string Prometheus instance to probe for active alerts
--reboot-command string command to run when a reboot is required by the sentinel (default "/sbin/systemctl reboot")
--reboot-days strings schedule reboot on these days (default [su,mo,tu,we,th,fr,sa])
--reboot-delay duration add a delay after drain finishes but before the reboot command is issued (default 0, no time)
--reboot-sentinel string path to file whose existence signals need to reboot (default "/var/run/reboot-required")
--reboot-sentinel-command string command for which a successful run signals need to reboot (default ""). If non-empty, sentinel file will be ignored.
--slack-channel string slack channel for reboot notfications
--slack-hook-url string slack hook URL for reboot notfications [deprecated in favor of --notify-url]
--slack-username string slack username for reboot notfications (default "kured")
--start-time string schedule reboot only after this time of day (default "0:00")
--time-zone string use this timezone for schedule inputs (default "UTC")
--log-format string log format specified as text or json, defaults to "text"
```
### Reboot Sentinel File & Period
By default kured checks for the existence of
`/var/run/reboot-required` every sixty minutes; you can override these
values with `--reboot-sentinel` and `--period`. Each replica of the
daemon uses a random offset derived from the period on startup so that
nodes don't all contend for the lock simultaneously.
Alternatively, a reboot sentinel command can be used. If a reboot
sentinel command is used, the reboot sentinel file presence will be
ignored.
### Setting a schedule
By default, kured will reboot any time it detects the sentinel, but this
may cause reboots during odd hours. While service disruption does not
normally occur, anything is possible and operators may want to restrict
reboots to predictable schedules. Use `--reboot-days`, `--start-time`,
`--end-time`, and `--time-zone` to set a schedule. For example, business
hours on the west coast USA can be specified with:
```console
--reboot-days=mon,tue,wed,thu,fri
--start-time=9am
--end-time=5pm
--time-zone=America/Los_Angeles
```
Times can be formatted in numerous ways, including `5pm`, `5:00pm` `17:00`,
and `17`. `--time-zone` represents a Go `time.Location`, and can be `UTC`,
`Local`, or any entry in the standard Linux tz database.
Note that when using smaller time windows, you should consider shortening
the sentinel check period (`--period`).
### Blocking Reboots via Alerts
You may find it desirable to block automatic node reboots when there
are active alerts - you can do so by providing the URL of your
Prometheus server:
```console
--prometheus-url=http://prometheus.monitoring.svc.cluster.local
```
By default the presence of *any* active (pending or firing) alerts
will block reboots, however you can ignore specific alerts:
```console
--alert-filter-regexp=^(RebootRequired|AnotherBenignAlert|...$
```
You can also only block reboots for firing alerts:
```console
--alert-firing-only=true
```
See the section on Prometheus metrics for an important application of this
filter.
### Blocking Reboots via Pods
You can also block reboots of an _individual node_ when specific pods
are scheduled on it:
```console
--blocking-pod-selector=runtime=long,cost=expensive
```
Since label selector strings use commas to express logical 'and', you can
specify this parameter multiple times for 'or':
```console
--blocking-pod-selector=runtime=long,cost=expensive
--blocking-pod-selector=name=temperamental
```
In this case, the presence of either an (appropriately labelled) expensive long
running job or a known temperamental pod on a node will stop it rebooting.
> Try not to abuse this mechanism - it's better to strive for
> restartability where possible. If you do use it, make sure you set
> up a RebootRequired alert as described in the next section so that
> you can intervene manually if reboots are blocked for too long.
### Prometheus Metrics
Each kured pod exposes a single gauge metric (`:8080/metrics`) that
indicates the presence of the sentinel file:
```console
# HELP kured_reboot_required OS requires reboot due to software updates.
# TYPE kured_reboot_required gauge
kured_reboot_required{node="ip-xxx-xxx-xxx-xxx.ec2.internal"} 0
```
The purpose of this metric is to power an alert which will summon an
operator if the cluster cannot reboot itself automatically for a
prolonged period:
```console
# Alert if a reboot is required for any machines. Acts as a failsafe for the
# reboot daemon, which will not reboot nodes if there are pending alerts save
# this one.
ALERT RebootRequired
IF max(kured_reboot_required) != 0
FOR 24h
LABELS { severity="warning" }
ANNOTATIONS {
summary = "Machine(s) require being rebooted, and the reboot daemon has failed to do so for 24 hours",
impact = "Cluster nodes more vulnerable to security exploits. Eventually, no disk space left.",
description = "Machine(s) require being rebooted, probably due to kernel update.",
}
```
If you choose to employ such an alert and have configured kured to
probe for active alerts before rebooting, be sure to specify
`--alert-filter-regexp=^RebootRequired$` to avoid deadlock!
### Notifications
When you specify a formatted URL using `--notify-url`, kured will notify
about draining and rebooting nodes across a list of technologies.
![Notification](img/slack-notification.png)
Alternatively you can use the `--message-template-drain` and `--message-template-reboot` to customize the text of the message, e.g.
```cli
--message-template-drain="Draining node %s part of *my-cluster* in region *xyz*"
```
Here is the syntax:
- slack: `slack://tokenA/tokenB/tokenC`
(`--slack-hook-url` is deprecated but possible to use)
- rocketchat: `rocketchat://[username@]rocketchat-host/token[/channel|@recipient]`
- teams: `teams://tName/token-a/token-b/token-c`
> **Attention** as the [format of the url has changed](https://github.com/containrrr/shoutrrr/issues/138) you also have to specify a `tName`
- Email: `smtp://username:password@host:port/?fromAddress=fromAddress&toAddresses=recipient1[,recipient2,...]`
More details here: [containrrr.dev/shoutrrr/v0.4/services/overview](https://containrrr.dev/shoutrrr/v0.4/services/overview)
### Overriding Lock Configuration
The `--ds-name` and `--ds-namespace` arguments should match the name and
namespace of the daemonset used to deploy the reboot daemon - the locking is
implemented by means of an annotation on this resource. The defaults match
the daemonset YAML provided in the repository.
Similarly `--lock-annotation` can be used to change the name of the
annotation kured will use to store the lock, but the default is almost
certainly safe.
## Operation
The example commands in this section assume that you have not
overriden the default lock annotation, daemonset name or namespace;
if you have, you will have to adjust the commands accordingly.
### Testing
You can test your configuration by provoking a reboot on a node:
```console
sudo touch /var/run/reboot-required
```
### Disabling Reboots
If you need to temporarily stop kured from rebooting any nodes, you
can take the lock manually:
```console
kubectl -n kube-system annotate ds kured weave.works/kured-node-lock='{"nodeID":"manual"}'
```
Don't forget to release it afterwards!
### Manual Unlock
In exceptional circumstances, such as a node experiencing a permanent
failure whilst rebooting, manual intervention may be required to
remove the cluster lock:
```console
kubectl -n kube-system annotate ds kured weave.works/kured-node-lock-
```
> NB the `-` at the end of the command is important - it instructs
> `kubectl` to remove that annotation entirely.
### Automatic Unlock
In exceptional circumstances (especially when used with cluster-autoscaler) a node
which holds lock might be killed thus annotation will stay there for ever.
Using `--lock-ttl=30m` will allow other nodes to take over if TTL has expired (in this case 30min) and continue reboot process.
### Delaying Lock Release
Using `--lock-release-delay=30m` will cause nodes to hold the lock for the specified time frame (in this case 30min) before it is released and the reboot process continues. This can be used to throttle reboots across the cluster.
## Building
Kured now uses [Go
Modules](https://github.com/golang/go/wiki/Modules), so build
instructions vary depending on where you have checked out the
repository:
**Building outside $GOPATH:**
```console
make
```
**Building inside $GOPATH:**
```console
GO111MODULE=on make
```
You can find the current preferred version of Golang in the [go.mod file](go.mod).
If you are interested in contributing code to kured, please take a look at
our [development][development] docs.
[development]: DEVELOPMENT.md
## Frequently Asked/Anticipated Questions
### Why is there no `latest` tag on Docker Hub?
Use of `latest` for production deployments is bad practice - see
[here](https://kubernetes.io/docs/concepts/configuration/overview) for
details. The manifest on `main` refers to `latest` for local
development testing with minikube only; for production use choose a
versioned manifest from the [release page](https://github.com/weaveworks/kured/releases/).
## Getting Help
If you have any questions about, feedback for or problems with `kured`:
- Invite yourself to the <a href="https://slack.cncf.io/" target="_blank">CNCF Slack</a>.
- Ask a question on the [#kured](https://cloud-native.slack.com/archives/kured) slack channel.
- [File an issue](https://github.com/kubereboot/kured/issues/new).
- Join us in [our monthly meeting](https://docs.google.com/document/d/1AWT8YDdqZY-Se6Y1oAlwtujWLVpNVK2M_F_Vfqw06aI/edit),
every first Wednesday of the month at 16:00 UTC.
- You might want to [join the kured-dev mailing list](https://lists.cncf.io/g/cncf-kured-dev) as well.
* Invite yourself to the <a href="https://slack.weave.works/" target="_blank">Weave Users Slack</a>.
* Ask a question on the [#kured](https://weave-community.slack.com/messages/kured/) slack channel.
* [File an issue](https://github.com/weaveworks/kured/issues/new).
* Join us in [our monthly meeting](https://docs.google.com/document/d/1bsHTjHhqaaZ7yJnXF6W8c89UB_yn-OoSZEmDnIP34n8/edit#),
every fourth Wednesday of the month at 16:00 UTC.
We follow the [CNCF Code of Conduct](CODE_OF_CONDUCT.md).
We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
Your feedback is always welcome!
## Trademarks
**Kured is a [Cloud Native Computing Foundation](https://cncf.io/) Sandbox project.**
![Cloud Native Computing Foundation logo](img/cncf-color.png)
The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see [Trademark Usage](https://www.linuxfoundation.org/legal/trademark-usage).
## License
[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fkubereboot%2Fkured.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fkubereboot%2Fkured?ref=badge_large)

21
charts/kured/.helmignore Normal file
View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

14
charts/kured/Chart.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: v1
appVersion: "1.9.1"
description: A Helm chart for kured
name: kured
version: 2.11.2
home: https://github.com/weaveworks/kured
maintainers:
- name: ckotzbauer
email: christian.kotzbauer@gmail.com
- name: davidkarlsen
email: david@davidkarlsen.com
sources:
- https://github.com/weaveworks/kured
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png

126
charts/kured/README.md Normal file
View File

@@ -0,0 +1,126 @@
# Kured (KUbernetes REboot Daemon)
## Introduction
This chart installs the "Kubernetes Reboot Daemon" using the Helm Package Manager.
## Prerequisites
- Kubernetes 1.9+
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ helm repo add kured https://weaveworks.github.io/kured
$ helm install my-release kured/kured
```
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```bash
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Migrate from stable Helm-Chart
The following changes have been made compared to the stable chart:
- **[BREAKING CHANGE]** The `autolock` feature was removed. Use `configuration.startTime` and `configuration.endTime` instead.
- Role inconsistencies have been fixed (allowed verbs for modifying the `DaemonSet`, apiGroup of `PodSecurityPolicy`)
- Added support for affinities.
- Configuration of cli-flags can be made through a `configuration` object.
- Added optional `Service` and `ServiceMonitor` support for metrics endpoint.
## Configuration
| Config | Description | Default |
| ------ | ----------- | ------- |
| `image.repository` | Image repository | `weaveworks/kured` |
| `image.tag` | Image tag | `1.9.1` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Image pull secrets | `[]` |
| `updateStrategy` | Daemonset update strategy | `RollingUpdate` |
| `maxUnavailable` | The max pods unavailable during a rolling update | `1` |
| `podAnnotations` | Annotations to apply to pods (eg to add Prometheus annotations) | `{}` |
| `dsAnnotations` | Annotations to apply to the kured DaemonSet | `{}` |
| `extraArgs` | Extra arguments to pass to `/usr/bin/kured`. See below. | `{}` |
| `extraEnvVars` | Array of environment variables to pass to the daemonset. | `{}` |
| `configuration.lockTtl` | cli-parameter `--lock-ttl` | `0` |
| `configuration.lockReleaseDelay` | cli-parameter `--lock-release-delay` | `0` |
| `configuration.alertFilterRegexp` | cli-parameter `--alert-filter-regexp` | `""` |
| `configuration.alertFiringOnly` | cli-parameter `--alert-firing-only` | `false` |
| `configuration.blockingPodSelector` | Array of selectors for multiple cli-parameters `--blocking-pod-selector` | `[]` |
| `configuration.endTime` | cli-parameter `--end-time` | `""` |
| `configuration.lockAnnotation` | cli-parameter `--lock-annotation` | `""` |
| `configuration.period` | cli-parameter `--period` | `""` |
| `configuration.forceReboot` | cli-parameter `--force-reboot` | `false` |
| `configuration.drainGracePeriod` | cli-parameter `--drain-grace-period` | `""` |
| `configuration.drainTimeout` | cli-parameter `--drain-timeout` | `""` |
| `configuration.skipWaitForDeleteTimeout` | cli-parameter `--skip-wait-for-delete-timeout` | `""` |
| `configuration.prometheusUrl` | cli-parameter `--prometheus-url` | `""` |
| `configuration.rebootDays` | Array of days for multiple cli-parameters `--reboot-days` | `[]` |
| `configuration.rebootSentinel` | cli-parameter `--reboot-sentinel` | `""` |
| `configuration.rebootSentinelCommand` | cli-parameter `--reboot-sentinel-command` | `""` |
| `configuration.rebootCommand` | cli-parameter `--reboot-command` | `""` |
| `configuration.rebootDelay` | cli-parameter `--reboot-delay` | `""` |
| `configuration.slackChannel` | cli-parameter `--slack-channel` | `""` |
| `configuration.slackHookUrl` | cli-parameter `--slack-hook-url` | `""` |
| `configuration.slackUsername` | cli-parameter `--slack-username` | `""` |
| `configuration.notifyUrl` | cli-parameter `--notify-url` | `""` |
| `configuration.messageTemplateDrain` | cli-parameter `--message-template-drain` | `""` |
| `configuration.messageTemplateReboot` | cli-parameter `--message-template-reboot` | `""` |
| `configuration.startTime` | cli-parameter `--start-time` | `""` |
| `configuration.timeZone` | cli-parameter `--time-zone` | `""` |
| `configuration.annotateNodes` | cli-parameter `--annotate-nodes` | `false` |
| `configuration.logFormat` | cli-parameter `--log-format` | `"text"` |
| `configuration.preferNoScheduleTaint` | Taint name applied during pending node reboot | `""` |
| `rbac.create` | Create RBAC roles | `true` |
| `serviceAccount.create` | Create a service account | `true` |
| `serviceAccount.name` | Service account name to create (or use if `serviceAccount.create` is false) | (chart fullname) |
| `podSecurityPolicy.create` | Create podSecurityPolicy | `false` |
| `resources` | Resources requests and limits. | `{}` |
| `metrics.create` | Create a ServiceMonitor for prometheus-operator | `false` |
| `metrics.namespace` | The namespace to create the ServiceMonitor in | `""` |
| `metrics.labels` | Additional labels for the ServiceMonitor | `{}` |
| `metrics.interval` | Interval prometheus should scrape the endpoint | `60s` |
| `metrics.scrapeTimeout` | A custom scrapeTimeout for prometheus | `""` |
| `service.create` | Create a Service for the metrics endpoint | `false` |
| `service.name ` | Service name for the metrics endpoint | `""` |
| `service.port` | Port of the service to expose | `8080` |
| `service.annotations` | Annotations to apply to the service (eg to add Prometheus annotations) | `{}` |
| `podLabels` | Additional labels for pods (e.g. CostCenter=IT) | `{}` |
| `priorityClassName` | Priority Class to be used by the pods | `""` |
| `tolerations` | Tolerations to apply to the daemonset (eg to allow running on master) | `[{"key": "node-role.kubernetes.io/master", "effect": "NoSchedule"}]`|
| `affinity` | Affinity for the daemonset (ie, restrict which nodes kured runs on) | `{}` |
| `nodeSelector` | Node Selector for the daemonset (ie, restrict which nodes kured runs on) | `{}` |
| `volumeMounts` | Maps of volumes mount to mount | `{}` |
| `volumes` | Maps of volumes to mount | `{}` |
See https://github.com/weaveworks/kured#configuration for values (not contained in the `configuration` object) for `extraArgs`. Note that
```yaml
extraArgs:
foo: 1
bar-baz: 2
```
becomes `/usr/bin/kured ... --foo=1 --bar-baz=2`.
## Prometheus Metrics
Kured exposes a single prometheus metric indicating whether a reboot is required or not (see [kured docs](https://github.com/weaveworks/kured#prometheus-metrics)) for details.
#### Prometheus-Operator
```yaml
metrics:
create: true
```
#### Prometheus Annotations
```yaml
service:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "8080"
```

View File

@@ -0,0 +1,13 @@
# This is tested twice:
# Basic install test with chart-testing (on charts PRs)
# Functional testing in PRs (other PRs)
service:
create: true
name: kured-prometheus-endpoint
port: 8080
type: NodePort
nodePort: 30000
# Do not override the configuration: period in this, so that
# We can test prometheus exposed metrics without rebooting.

View File

@@ -0,0 +1,3 @@
Kured will check for /var/run/reboot-required, and reboot nodes when needed.
See https://github.com/weaveworks/kured/ for details.

View File

@@ -0,0 +1,72 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kured.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kured.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kured.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "kured.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "kured.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for podsecuritypolicy.
*/}}
{{- define "kured.psp.apiVersion" -}}
{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "policy/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/*
Returns a set of labels applied to each resource.
*/}}
{{- define "kured.labels" -}}
app: {{ template "kured.name" . }}
chart: {{ template "kured.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- end -}}
{{/*
Returns a set of matchLabels applied.
*/}}
{{- define "kured.matchLabels" -}}
app: {{ template "kured.name" . }}
release: {{ .Release.Name }}
{{- end -}}

View File

@@ -0,0 +1,30 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kured.fullname" . }}
labels:
{{- include "kured.labels" . | nindent 4 }}
rules:
# Allow kured to read spec.unschedulable
# Allow kubectl to drain/uncordon
#
# NB: These permissions are tightly coupled to the bundled version of kubectl; the ones below
# match https://github.com/kubernetes/kubernetes/blob/v1.19.4/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go
#
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "patch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["list","delete","get"]
- apiGroups: ["extensions"]
resources: ["daemonsets"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
{{- end -}}

View File

@@ -0,0 +1,16 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "kured.fullname" . }}
labels:
{{- include "kured.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kured.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kured.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@@ -0,0 +1,187 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ template "kured.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kured.labels" . | nindent 4 }}
{{- if .Values.dsAnnotations }}
annotations:
{{- range $key, $value := .Values.dsAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
updateStrategy:
type: {{ .Values.updateStrategy }}
{{- if eq .Values.updateStrategy "RollingUpdate"}}
rollingUpdate:
maxUnavailable: {{ .Values.maxUnavailable }}
{{- end}}
selector:
matchLabels:
{{- include "kured.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "kured.labels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 8 }}
{{- end }}
{{- if .Values.podAnnotations }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
serviceAccountName: {{ template "kured.serviceAccountName" . }}
hostPID: true
restartPolicy: Always
{{- with .Values.image.pullSecrets }}
imagePullSecrets:
{{ toYaml . | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
privileged: true # Give permission to nsenter /proc/1/ns/mnt
resources:
{{ toYaml .Values.resources | indent 12 }}
command:
- /usr/bin/kured
args:
- --ds-name={{ template "kured.fullname" . }}
- --ds-namespace={{ .Release.Namespace }}
{{- if .Values.configuration.lockTtl }}
- --lock-ttl={{ .Values.configuration.lockTtl }}
{{- end }}
{{- if .Values.configuration.lockReleaseDelay }}
- --lock-release-delay={{ .Values.configuration.lockReleaseDelay }}
{{- end }}
{{- if .Values.configuration.alertFilterRegexp }}
- --alert-filter-regexp={{ .Values.configuration.alertFilterRegexp }}
{{- end }}
{{- if .Values.configuration.alertFiringOnly }}
- --alert-firing-only={{ .Values.configuration.alertFiringOnly }}
{{- end }}
{{- range .Values.configuration.blockingPodSelector }}
- --blocking-pod-selector={{ . }}
{{- end }}
{{- if .Values.configuration.endTime }}
- --end-time={{ .Values.configuration.endTime }}
{{- end }}
{{- if .Values.configuration.lockAnnotation }}
- --lock-annotation={{ .Values.configuration.lockAnnotation }}
{{- end }}
{{- if .Values.configuration.period }}
- --period={{ .Values.configuration.period }}
{{- end }}
{{- if .Values.configuration.forceReboot }}
- --force-reboot
{{- end }}
{{- if .Values.configuration.drainGracePeriod }}
- --drain-grace-period={{ .Values.configuration.drainGracePeriod }}
{{- end }}
{{- if .Values.configuration.drainTimeout }}
- --drain-timeout={{ .Values.configuration.drainTimeout }}
{{- end }}
{{- if .Values.configuration.skipWaitForDeleteTimeout }}
- --skip-wait-for-delete-timeout={{ .Values.configuration.skipWaitForDeleteTimeout }}
{{- end }}
{{- if .Values.configuration.prometheusUrl }}
- --prometheus-url={{ .Values.configuration.prometheusUrl }}
{{- end }}
{{- range .Values.configuration.rebootDays }}
- --reboot-days={{ . }}
{{- end }}
{{- if .Values.configuration.rebootSentinel }}
- --reboot-sentinel={{ .Values.configuration.rebootSentinel }}
{{- end }}
{{- if .Values.configuration.rebootSentinelCommand }}
- --reboot-sentinel-command={{ .Values.configuration.rebootSentinelCommand }}
{{- end }}
{{- if .Values.configuration.rebootCommand }}
- --reboot-command={{ .Values.configuration.rebootCommand }}
{{- end }}
{{- if .Values.configuration.rebootDelay }}
- --reboot-delay={{ .Values.configuration.rebootDelay }}
{{- end }}
{{- if .Values.configuration.slackChannel }}
- --slack-channel={{ .Values.configuration.slackChannel }}
{{- end }}
{{- if .Values.configuration.slackHookUrl }}
- --slack-hook-url={{ .Values.configuration.slackHookUrl }}
{{- end }}
{{- if .Values.configuration.slackUsername }}
- --slack-username={{ .Values.configuration.slackUsername }}
{{- end }}
{{- if .Values.configuration.notifyUrl }}
- --notify-url={{ .Values.configuration.notifyUrl }}
{{- end }}
{{- if .Values.configuration.messageTemplateDrain }}
- --message-template-drain={{ .Values.configuration.messageTemplateDrain }}
{{- end }}
{{- if .Values.configuration.messageTemplateReboot }}
- --message-template-reboot={{ .Values.configuration.messageTemplateReboot }}
{{- end }}
{{- if .Values.configuration.startTime }}
- --start-time={{ .Values.configuration.startTime }}
{{- end }}
{{- if .Values.configuration.timeZone }}
- --time-zone={{ .Values.configuration.timeZone }}
{{- end }}
{{- if .Values.configuration.annotateNodes }}
- --annotate-nodes={{ .Values.configuration.annotateNodes }}
{{- end }}
{{- if .Values.configuration.preferNoScheduleTaint }}
- --prefer-no-schedule-taint={{ .Values.configuration.preferNoScheduleTaint }}
{{- end }}
{{- if .Values.configuration.logFormat }}
- --log-format={{ .Values.configuration.logFormat }}
{{- end }}
{{- range $key, $value := .Values.extraArgs }}
{{- if $value }}
- --{{ $key }}={{ $value }}
{{- else }}
- --{{ $key }}
{{- end }}
{{- end }}
{{- if .Values.volumeMounts }}
volumeMounts:
{{- toYaml .Values.volumeMounts | nindent 12 }}
{{- end }}
ports:
- containerPort: 8080
name: metrics
env:
# Pass in the name of the node on which this pod is scheduled
# for use with drain/uncordon operations and lock acquisition
- name: KURED_NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{{- if .Values.extraEnvVars }}
{{ toYaml .Values.extraEnvVars | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- if .Values.volumes }}
volumes:
{{- toYaml .Values.volumes | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,21 @@
{{- if .Values.podSecurityPolicy.create}}
apiVersion: {{ template "kured.psp.apiVersion" . }}
kind: PodSecurityPolicy
metadata:
name: {{ template "kured.fullname" . }}
labels:
{{- include "kured.labels" . | nindent 4 }}
spec:
privileged: true
hostPID: true
allowedCapabilities: ['*']
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes: ['*']
{{- end }}

View File

@@ -0,0 +1,30 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "kured.fullname" . }}
labels:
{{- include "kured.labels" . | nindent 4 }}
rules:
# Allow kured to lock/unlock itself
- apiGroups: ["extensions"]
resources: ["daemonsets"]
resourceNames: ["{{ template "kured.fullname" . }}"]
verbs: ["update", "patch"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
resourceNames: ["{{ template "kured.fullname" . }}"]
verbs: ["update", "patch"]
{{- if .Values.podSecurityPolicy.create }}
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["{{ template "kured.fullname" . }}"]
verbs: ["use"]
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
resourceNames: ["{{ template "kured.fullname" . }}"]
verbs: ["use"]
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,17 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
namespace: {{ .Release.Namespace }}
name: {{ template "kured.fullname" . }}
labels:
{{- include "kured.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
namespace: {{ .Release.Namespace }}
name: {{ template "kured.serviceAccountName" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "kured.fullname" . }}
{{- end -}}

View File

@@ -0,0 +1,29 @@
{{- if or .Values.service.create .Values.metrics.create }}
apiVersion: v1
kind: Service
metadata:
{{- if .Values.service.name }}
name: {{ .Values.service.name }}
{{- else }}
name: {{ template "kured.fullname" . }}
{{- end }}
labels:
{{- include "kured.labels" . | nindent 4 }}
{{- if .Values.service.annotations }}
annotations:
{{- range $key, $value := .Values.service.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- name: metrics
port: {{ .Values.service.port }}
targetPort: 8080
{{- if eq .Values.service.type "NodePort" }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
{{- include "kured.matchLabels" . | nindent 4 }}
{{- end }}

View File

@@ -0,0 +1,9 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kured.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kured.labels" . | nindent 4 }}
{{- end -}}

View File

@@ -0,0 +1,31 @@
{{- if .Values.metrics.create }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kured.fullname" . }}
{{- if .Values.metrics.namespace }}
namespace: {{ .Values.metrics.namespace }}
{{- end }}
labels:
{{- include "kured.labels" . | nindent 4 }}
{{- if .Values.metrics.labels }}
{{- toYaml .Values.metrics.labels | nindent 4 }}
{{- end }}
spec:
endpoints:
- interval: {{ .Values.metrics.interval }}
{{- if .Values.metrics.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.scrapeTimeout }}
{{- end }}
honorLabels: true
targetPort: 8080
path: /metrics
scheme: http
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "kured.matchLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,31 @@
image:
repository: weaveworks/kured
tag: latest
configuration:
# annotationTtl: 0 # force clean annotation after this amount of time (default 0, disabled)
# alertFilterRegexp: "" # alert names to ignore when checking for active alerts
# alertFiringOnly: false # only consider firing alerts when checking for active alerts
# blockingPodSelector: [] # label selector identifying pods whose presence should prevent reboots
# endTime: "" # only reboot before this time of day (default "23:59")
# lockAnnotation: "" # annotation in which to record locking node (default "weave.works/kured-node-lock")
period: "1m" # reboot check period (default 1h0m0s)
# forceReboot: false # force a reboot even if the drain fails or times out (default: false)
# drainGracePeriod: "" # time in seconds given to each pod to terminate gracefully, if negative, the default value specified in the pod will be used (default: -1)
# drainTimeout: "" # timeout after which the drain is aborted (default: 0, infinite time)
# skipWaitForDeleteTimeout: "" # when time is greater than zero, skip waiting for the pods whose deletion timestamp is older than N seconds while draining a node (default: 0)
# prometheusUrl: "" # Prometheus instance to probe for active alerts
# rebootDays: [] # only reboot on these days (default [su,mo,tu,we,th,fr,sa])
# rebootSentinel: "" # path to file whose existence signals need to reboot (default "/var/run/reboot-required")
# rebootSentinelCommand: "" # command for which a successful run signals need to reboot (default ""). If non-empty, sentinel file will be ignored.
# slackChannel: "" # slack channel for reboot notfications
# slackHookUrl: "" # slack hook URL for reboot notfications
# slackUsername: "" # slack username for reboot notfications (default "kured")
# notifyUrl: "" # notification URL with the syntax as follows: https://containrrr.dev/shoutrrr/services/overview/
# messageTemplateDrain: "" # slack message template when notifying about a node being drained (default "Draining node %s")
# messageTemplateReboot: "" # slack message template when notifying about a node being rebooted (default "Rebooted node %s")
# startTime: "" # only reboot after this time of day (default "0:00")
# timeZone: "" # time-zone to use (valid zones from "time" golang package)
# annotateNodes: false # enable 'weave.works/kured-reboot-in-progress' and 'weave.works/kured-most-recent-reboot-needed' node annotations to signify kured reboot operations
# lockReleaseDelay: "5m" # hold lock after reboot by this amount of time (default 0, disabled)
# logFormat: "text" # log format specified as text or json, defaults to text

96
charts/kured/values.yaml Normal file
View File

@@ -0,0 +1,96 @@
image:
repository: weaveworks/kured
tag: "" # will default to the appVersion in Chart.yaml
pullPolicy: IfNotPresent
pullSecrets: []
updateStrategy: RollingUpdate
# requires RollingUpdate updateStrategy
maxUnavailable: 1
podAnnotations: {}
dsAnnotations: {}
extraArgs: {}
extraEnvVars:
# - name: slackHookUrl
# valueFrom:
# secretKeyRef:
# name: secret_name
# key: secret_key
# - name: regularEnvVariable
# value: 123
configuration:
lockTtl: 0 # force clean annotation after this amount of time (default 0, disabled)
alertFilterRegexp: "" # alert names to ignore when checking for active alerts
alertFiringOnly: false # only consider firing alerts when checking for active alerts
blockingPodSelector: [] # label selector identifying pods whose presence should prevent reboots
endTime: "" # only reboot before this time of day (default "23:59")
lockAnnotation: "" # annotation in which to record locking node (default "weave.works/kured-node-lock")
period: "" # reboot check period (default 1h0m0s)
forceReboot: false # force a reboot even if the drain fails or times out (default: false)
drainGracePeriod: "" # time in seconds given to each pod to terminate gracefully, if negative, the default value specified in the pod will be used (default: -1)
drainTimeout: "" # timeout after which the drain is aborted (default: 0, infinite time)
skipWaitForDeleteTimeout: "" # when time is greater than zero, skip waiting for the pods whose deletion timestamp is older than N seconds while draining a node (default: 0)
prometheusUrl: "" # Prometheus instance to probe for active alerts
rebootDays: [] # only reboot on these days (default [su,mo,tu,we,th,fr,sa])
rebootSentinel: "" # path to file whose existence signals need to reboot (default "/var/run/reboot-required")
rebootSentinelCommand: "" # command for which a successful run signals need to reboot (default ""). If non-empty, sentinel file will be ignored.
rebootCommand: "/bin/systemctl reboot" # command to run when a reboot is required by the sentinel
rebootDelay: "" # add a delay after drain finishes but before the reboot command is issued
slackChannel: "" # slack channel for reboot notfications
slackHookUrl: "" # slack hook URL for reboot notfications
slackUsername: "" # slack username for reboot notfications (default "kured")
notifyUrl: "" # notification URL with the syntax as follows: https://containrrr.dev/shoutrrr/services/overview/
messageTemplateDrain: "" # slack message template when notifying about a node being drained (default "Draining node %s")
messageTemplateReboot: "" # slack message template when notifying about a node being rebooted (default "Rebooted node %s")
startTime: "" # only reboot after this time of day (default "0:00")
timeZone: "" # time-zone to use (valid zones from "time" golang package)
annotateNodes: false # enable 'weave.works/kured-reboot-in-progress' and 'weave.works/kured-most-recent-reboot-needed' node annotations to signify kured reboot operations
lockReleaseDelay: 0 # hold lock after reboot by this amount of time (default 0, disabled)
preferNoScheduleTaint: "" # Taint name applied during pending node reboot (to prevent receiving additional pods from other rebooting nodes). Disabled by default. Set e.g. to "weave.works/kured-node-reboot" to enable tainting.
logFormat: "text" # log format specified as text or json, defaults to text
rbac:
create: true
serviceAccount:
create: true
name:
podSecurityPolicy:
create: false
resources: {}
metrics:
create: false
namespace: ""
labels: {}
interval: 60s
scrapeTimeout: ""
service:
create: false
port: 8080
annotations: {}
name: ""
type: ClusterIP
podLabels: {}
priorityClassName: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
affinity: {}
nodeSelector: {}
volumeMounts: []
volumes: []

4
cmd/kured/Dockerfile Normal file
View File

@@ -0,0 +1,4 @@
FROM alpine:3.15.0
RUN apk update --no-cache && apk upgrade --no-cache && apk add --no-cache ca-certificates tzdata
COPY ./kured /usr/bin/kured
ENTRYPOINT ["/usr/bin/kured"]

View File

@@ -0,0 +1,19 @@
FROM --platform=$BUILDPLATFORM golang:bullseye AS build
ARG TARGETOS
ARG TARGETARCH
ARG TARGETVARIANT
ENV GOOS=$TARGETOS
ENV GOARCH=$TARGETARCH
ENV GOVARIANT=$TARGETVARIANT
WORKDIR /src
COPY . .
RUN go list -f '{{join .Deps "\n"}}' ./cmd/kured | grep -v /vendor/ | xargs go list -f '{{if not .Standard}}{{ $dep := . }}{{range .GoFiles}}{{$dep.Dir}}/{{.}} {{end}}{{end}}'
RUN CGO_ENABLED=0 go build -o cmd/kured/kured cmd/kured/*.go
FROM --platform=$TARGETPLATFORM alpine:3.15 as bin
RUN apk update --no-cache && apk upgrade --no-cache && apk add --no-cache ca-certificates tzdata
COPY --from=build /src/cmd/kured/kured /usr/bin/kured
ENTRYPOINT ["/usr/bin/kured"]

File diff suppressed because it is too large Load Diff

View File

@@ -3,74 +3,233 @@ package main
import (
"reflect"
"testing"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/weaveworks/kured/pkg/alerts"
assert "gotest.tools/v3/assert"
papi "github.com/prometheus/client_golang/api"
)
func TestValidateNotificationURL(t *testing.T) {
type BlockingChecker struct {
blocking bool
}
func (fbc BlockingChecker) isBlocked() bool {
return fbc.blocking
}
var _ RebootBlocker = BlockingChecker{} // Verify that Type implements Interface.
var _ RebootBlocker = (*BlockingChecker)(nil) // Verify that *Type implements Interface.
func Test_flagCheck(t *testing.T) {
var cmd *cobra.Command
var args []string
slackHookURL = "https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
flagCheck(cmd, args)
if notifyURL != "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET" {
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET", notifyURL)
}
}
func Test_rebootBlocked(t *testing.T) {
noCheckers := []RebootBlocker{}
nonblockingChecker := BlockingChecker{blocking: false}
blockingChecker := BlockingChecker{blocking: true}
// Instantiate a prometheusClient with a broken_url
promClient, err := alerts.NewPromClient(papi.Config{Address: "broken_url"})
if err != nil {
log.Fatal("Can't create prometheusClient: ", err)
}
brokenPrometheusClient := PrometheusBlockingChecker{promClient: promClient, filter: nil, firingOnly: false}
type args struct {
blockers []RebootBlocker
}
tests := []struct {
name string
slackHookURL string
notifyURL string
expected string
name string
args args
want bool
}{
{"slackHookURL only works fine", "https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET", "", "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"},
{"slackHookURL and notify URL together only keeps notifyURL", "\"https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET\"", "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com", "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"},
{"slackHookURL removes extraneous double quotes", "\"https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET\"", "", "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"},
{"slackHookURL removes extraneous single quotes", "'https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET'", "", "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"},
{"notifyURL removes extraneous double quotes", "", "\"teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com\"", "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"},
{"notifyURL removes extraneous single quotes", "", "'teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com'", "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"},
{
name: "Do not block on no blocker defined",
args: args{blockers: noCheckers},
want: false,
},
{
name: "Ensure a blocker blocks",
args: args{blockers: []RebootBlocker{blockingChecker}},
want: true,
},
{
name: "Ensure a non-blocker doesn't block",
args: args{blockers: []RebootBlocker{nonblockingChecker}},
want: false,
},
{
name: "Ensure one blocker is enough to block",
args: args{blockers: []RebootBlocker{nonblockingChecker, blockingChecker}},
want: true,
},
{
name: "Do block on error contacting prometheus API",
args: args{blockers: []RebootBlocker{brokenPrometheusClient}},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := validateNotificationURL(tt.notifyURL, tt.slackHookURL); !reflect.DeepEqual(got, tt.expected) {
t.Errorf("validateNotificationURL() = %v, expected %v", got, tt.expected)
if got := rebootBlocked(tt.args.blockers...); got != tt.want {
t.Errorf("rebootBlocked() = %v, want %v", got, tt.want)
}
})
}
}
func Test_stripQuotes(t *testing.T) {
func Test_buildHostCommand(t *testing.T) {
type args struct {
pid int
command []string
}
tests := []struct {
name string
input string
expected string
name string
args args
want []string
}{
{
name: "string with no surrounding quotes is unchanged",
input: "Hello, world!",
expected: "Hello, world!",
},
{
name: "string with surrounding double quotes should strip quotes",
input: "\"Hello, world!\"",
expected: "Hello, world!",
},
{
name: "string with surrounding single quotes should strip quotes",
input: "'Hello, world!'",
expected: "Hello, world!",
},
{
name: "string with unbalanced surrounding quotes is unchanged",
input: "'Hello, world!\"",
expected: "'Hello, world!\"",
},
{
name: "string with length of one is unchanged",
input: "'",
expected: "'",
},
{
name: "string with length of zero is unchanged",
input: "",
expected: "",
name: "Ensure command will run with nsenter",
args: args{pid: 1, command: []string{"ls", "-Fal"}},
want: []string{"/usr/bin/nsenter", "-m/proc/1/ns/mnt", "--", "ls", "-Fal"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := stripQuotes(tt.input); !reflect.DeepEqual(got, tt.expected) {
t.Errorf("stripQuotes() = %v, expected %v", got, tt.expected)
if got := buildHostCommand(tt.args.pid, tt.args.command); !reflect.DeepEqual(got, tt.want) {
t.Errorf("buildHostCommand() = %v, want %v", got, tt.want)
}
})
}
}
func Test_buildSentinelCommand(t *testing.T) {
type args struct {
rebootSentinelFile string
rebootSentinelCommand string
}
tests := []struct {
name string
args args
want []string
}{
{
name: "Ensure a sentinelFile generates a shell 'test' command with the right file",
args: args{
rebootSentinelFile: "/test1",
rebootSentinelCommand: "",
},
want: []string{"test", "-f", "/test1"},
},
{
name: "Ensure a sentinelCommand has priority over a sentinelFile if both are provided (because sentinelFile is always provided)",
args: args{
rebootSentinelFile: "/test1",
rebootSentinelCommand: "/sbin/reboot-required -r",
},
want: []string{"/sbin/reboot-required", "-r"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := buildSentinelCommand(tt.args.rebootSentinelFile, tt.args.rebootSentinelCommand); !reflect.DeepEqual(got, tt.want) {
t.Errorf("buildSentinelCommand() = %v, want %v", got, tt.want)
}
})
}
}
func Test_parseRebootCommand(t *testing.T) {
type args struct {
rebootCommand string
}
tests := []struct {
name string
args args
want []string
}{
{
name: "Ensure a reboot command is properly parsed",
args: args{
rebootCommand: "/sbin/systemctl reboot",
},
want: []string{"/sbin/systemctl", "reboot"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := parseRebootCommand(tt.args.rebootCommand); !reflect.DeepEqual(got, tt.want) {
t.Errorf("parseRebootCommand() = %v, want %v", got, tt.want)
}
})
}
}
func Test_rebootRequired(t *testing.T) {
type args struct {
sentinelCommand []string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "Ensure rc = 0 means reboot required",
args: args{
sentinelCommand: []string{"true"},
},
want: true,
},
{
name: "Ensure rc != 0 means reboot NOT required",
args: args{
sentinelCommand: []string{"false"},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := rebootRequired(tt.args.sentinelCommand); got != tt.want {
t.Errorf("rebootRequired() = %v, want %v", got, tt.want)
}
})
}
}
func Test_rebootRequired_fatals(t *testing.T) {
cases := []struct {
param []string
expectFatal bool
}{
{
param: []string{"true"},
expectFatal: false,
},
{
param: []string{"./babar"},
expectFatal: true,
},
}
defer func() { log.StandardLogger().ExitFunc = nil }()
var fatal bool
log.StandardLogger().ExitFunc = func(int) { fatal = true }
for _, c := range cases {
fatal = false
rebootRequired(c.param)
assert.Equal(t, c.expectFatal, fatal)
}
}

View File

@@ -5,14 +5,14 @@ import (
)
type regexpValue struct {
*regexp.Regexp
value **regexp.Regexp
}
func (rev *regexpValue) String() string {
if rev.Regexp == nil {
if *rev.value == nil {
return ""
}
return rev.Regexp.String()
return (*rev.value).String()
}
func (rev *regexpValue) Set(s string) error {
@@ -20,11 +20,12 @@ func (rev *regexpValue) Set(s string) error {
if err != nil {
return err
}
rev.Regexp = value
*rev.value = value
return nil
}
// Type method returns the type of the flag as a string
func (rev *regexpValue) Type() string {
return "regexp"
return "regexp.Regexp"
}

100
go.mod
View File

@@ -1,90 +1,20 @@
module github.com/kubereboot/kured
module github.com/weaveworks/kured
go 1.24.11
go 1.16
require (
github.com/containrrr/shoutrrr v0.8.0
github.com/containrrr/shoutrrr v0.5.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/common v0.67.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/pflag v1.0.10
github.com/stretchr/testify v1.11.1
k8s.io/api v0.34.3
k8s.io/apimachinery v0.34.3
k8s.io/client-go v0.34.3
k8s.io/kubectl v0.34.3
)
require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/cobra v1.9.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/time v0.9.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/cli-runtime v0.34.3 // indirect
k8s.io/component-base v0.34.3 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/kustomize/api v0.20.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/common v0.32.1
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.3.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.10.1
github.com/stretchr/testify v1.7.0
gotest.tools/v3 v3.0.3
k8s.io/api v0.22.4
k8s.io/apimachinery v0.22.4
k8s.io/client-go v0.22.4
k8s.io/kubectl v0.22.4
)

1302
go.sum

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

BIN
img/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

BIN
img/slack-notification.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 KiB

View File

@@ -1,41 +0,0 @@
// Package internal provides convenient tools which shouldn't be in cmd/main
// It will eventually provide internal validation and chaining logic to select
// appropriate reboot and sentinel check methods based on configuration.
// It validates user input and instantiates the correct checker and rebooter implementations
// for use elsewhere in kured.
package internal
import (
"fmt"
"github.com/kubereboot/kured/pkg/checkers"
"github.com/kubereboot/kured/pkg/reboot"
log "github.com/sirupsen/logrus"
)
// NewRebooter validates the rebootMethod, rebootCommand, and rebootSignal input,
// then chains to the right constructor.
func NewRebooter(rebootMethod string, rebootCommand string, rebootSignal int) (reboot.Rebooter, error) {
switch rebootMethod {
case "command":
log.Infof("Reboot command: %s", rebootCommand)
return reboot.NewCommandRebooter(rebootCommand)
case "signal":
log.Infof("Reboot signal: %d", rebootSignal)
return reboot.NewSignalRebooter(rebootSignal)
default:
return nil, fmt.Errorf("invalid reboot-method configured %s, expected signal or command", rebootMethod)
}
}
// NewRebootChecker validates the rebootSentinelCommand, rebootSentinelFile input,
// then chains to the right constructor.
func NewRebootChecker(rebootSentinelCommand string, rebootSentinelFile string) (checkers.Checker, error) {
// An override of rebootSentinelCommand means a privileged command
if rebootSentinelCommand != "" {
log.Infof("Sentinel checker is (privileged) user provided command: %s", rebootSentinelCommand)
return checkers.NewCommandChecker(rebootSentinelCommand, 1, true)
}
log.Infof("Sentinel checker is (unprivileged) testing for the presence of: %s", rebootSentinelFile)
return checkers.NewFileRebootChecker(rebootSentinelFile)
}

View File

@@ -1,100 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kured
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kured # Must match `--ds-name`
namespace: kube-system # Must match `--ds-namespace`
spec:
selector:
matchLabels:
name: kured
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
name: kured
spec:
serviceAccountName: kured
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostPID: true # Facilitate entering the host mount namespace via init
restartPolicy: Always
volumes:
- name: sentinel
hostPath:
path: /var/run
type: Directory
containers:
- name: kured
# If you find yourself here wondering why there is no
# :latest tag on Docker Hub,see the FAQ in the README
image: ghcr.io/kubereboot/kured:1.21.0
imagePullPolicy: IfNotPresent
securityContext:
privileged: false # Give permission to nsenter /proc/1/ns/mnt
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["*"]
add: ["CAP_KILL"]
ports:
- containerPort: 8080
name: metrics
env:
# Pass in the name of the node on which this pod is scheduled
# for use with drain/uncordon operations and lock acquisition
- name: KURED_NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /sentinel
name: sentinel
readOnly: true
command:
- /usr/bin/kured
- --reboot-sentinel=/sentinel/reboot-required
- --reboot-method=signal
# - --reboot-signal=39
# - --force-reboot=false
# - --drain-grace-period=-1
# - --skip-wait-for-delete-timeout=0
# - --drain-timeout=0
# - --period=1h
# - --ds-namespace=kube-system
# - --ds-name=kured
# - --lock-annotation=weave.works/kured-node-lock
# - --lock-ttl=0
# - --prometheus-url=http://prometheus.monitoring.svc.cluster.local
# - --alert-filter-regexp=^RebootRequired$
# - --alert-firing-only=false
# - --prefer-no-schedule-taint=""
# - --reboot-sentinel-command=""
# - --slack-hook-url=https://hooks.slack.com/...
# - --slack-username=prod
# - --slack-channel=alerting
# - --notify-url="" # See also shoutrrr url format
# - --message-template-drain=Draining node %s
# - --message-template-reboot=Rebooting node %s
# - --message-template-uncordon=Node %s rebooted & uncordoned successfully!
# - --blocking-pod-selector=runtime=long,cost=expensive
# - --blocking-pod-selector=name=temperamental
# - --blocking-pod-selector=...
# - --reboot-days=sun,mon,tue,wed,thu,fri,sat
# - --reboot-delay=90s
# - --start-time=0:00
# - --end-time=23:59:59
# - --time-zone=UTC
# - --annotate-nodes=false
# - --lock-release-delay=30m
# - --log-format=text

View File

@@ -8,14 +8,14 @@ metadata:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kured # Must match `--ds-name`
name: kured # Must match `--ds-name`
namespace: kube-system # Must match `--ds-namespace`
spec:
selector:
matchLabels:
name: kured
updateStrategy:
type: RollingUpdate
type: RollingUpdate
template:
metadata:
labels:
@@ -23,29 +23,18 @@ spec:
spec:
serviceAccountName: kured
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
hostPID: true # Facilitate entering the host mount namespace via init
restartPolicy: Always
volumes:
- name: sentinel
hostPath:
path: /var/run
type: Directory
containers:
- name: kured
# If you find yourself here wondering why there is no
# :latest tag on Docker Hub,see the FAQ in the README
image: ghcr.io/kubereboot/kured:1.21.0
image: docker.io/weaveworks/kured:1.9.1
# If you find yourself here wondering why there is no
# :latest tag on Docker Hub,see the FAQ in the README
imagePullPolicy: IfNotPresent
securityContext:
privileged: true # Give permission to nsenter /proc/1/ns/mnt
readOnlyRootFilesystem: true
ports:
- containerPort: 8080
name: metrics
env:
# Pass in the name of the node on which this pod is scheduled
# for use with drain/uncordon operations and lock acquisition
@@ -53,19 +42,12 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /sentinel
name: sentinel
readOnly: true
command:
- /usr/bin/kured
- --reboot-sentinel=/sentinel/reboot-required
# - --force-reboot=false
# - --drain-grace-period=-1
# - --skip-wait-for-delete-timeout=0
# - --drain-delay=0
# - --drain-timeout=0
# - --drain-pod-selector=""
# - --period=1h
# - --ds-namespace=kube-system
# - --ds-name=kured
@@ -73,19 +55,16 @@ spec:
# - --lock-ttl=0
# - --prometheus-url=http://prometheus.monitoring.svc.cluster.local
# - --alert-filter-regexp=^RebootRequired$
# - --alert-filter-match-only=false
# - --alert-firing-only=false
# - --reboot-sentinel=/var/run/reboot-required
# - --prefer-no-schedule-taint=""
# - --reboot-sentinel-command=""
# - --reboot-method=command
# - --reboot-signal=39
# - --slack-hook-url=https://hooks.slack.com/...
# - --slack-username=prod
# - --slack-channel=alerting
# - --notify-url="" # See also shoutrrr url format
# - --message-template-drain=Draining node %s
# - --message-template-reboot=Rebooting node %s
# - --message-template-uncordon=Node %s rebooted & uncordoned successfully!
# - --message-template-drain=Rebooting node %s
# - --blocking-pod-selector=runtime=long,cost=expensive
# - --blocking-pod-selector=name=temperamental
# - --blocking-pod-selector=...
@@ -97,6 +76,3 @@ spec:
# - --annotate-nodes=false
# - --lock-release-delay=30m
# - --log-format=text
# - --metrics-host=""
# - --metrics-port=8080
# - --concurrency=1

69
pkg/alerts/prometheus.go Normal file
View File

@@ -0,0 +1,69 @@
package alerts
import (
"context"
"fmt"
"regexp"
"sort"
"time"
papi "github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
)
// PromClient is a wrapper around the Prometheus Client interface and implements the api
// This way, the PromClient can be instantiated with the configuration the Client needs, and
// the ability to use the methods the api has, like Query and so on.
type PromClient struct {
papi papi.Client
api v1.API
}
// NewPromClient creates a new client to the Prometheus API.
// It returns an error on any problem.
func NewPromClient(conf papi.Config) (*PromClient, error) {
promClient, err := papi.NewClient(conf)
if err != nil {
return nil, err
}
client := PromClient{papi: promClient, api: v1.NewAPI(promClient)}
return &client, nil
}
// ActiveAlerts is a method of type PromClient, it returns a list of names of active alerts
// (e.g. pending or firing), filtered by the supplied regexp or by the includeLabels query.
// filter by regexp means when the regex finds the alert-name; the alert is exluded from the
// block-list and will NOT block rebooting. query by includeLabel means,
// if the query finds an alert, it will include it to the block-list and it WILL block rebooting.
func (p *PromClient) ActiveAlerts(filter *regexp.Regexp, firingOnly bool) ([]string, error) {
// get all alerts from prometheus
value, _, err := p.api.Query(context.Background(), "ALERTS", time.Now())
if err != nil {
return nil, err
}
if value.Type() == model.ValVector {
if vector, ok := value.(model.Vector); ok {
activeAlertSet := make(map[string]bool)
for _, sample := range vector {
if alertName, isAlert := sample.Metric[model.AlertNameLabel]; isAlert && sample.Value != 0 {
if (filter == nil || !filter.MatchString(string(alertName))) && (!firingOnly || sample.Metric["alertstate"] == "firing") {
activeAlertSet[string(alertName)] = true
}
}
}
var activeAlerts []string
for activeAlert := range activeAlertSet {
activeAlerts = append(activeAlerts, activeAlert)
}
sort.Strings(activeAlerts)
return activeAlerts, nil
}
}
return nil, fmt.Errorf("Unexpected value type: %v", value)
}

View File

@@ -0,0 +1,141 @@
package alerts
import (
"log"
"net/http"
"net/http/httptest"
"regexp"
"testing"
"github.com/prometheus/client_golang/api"
"github.com/stretchr/testify/assert"
)
type MockResponse struct {
StatusCode int
Body []byte
}
// MockServerProperties ties a mock response to a url and a method
type MockServerProperties struct {
URI string
HTTPMethod string
Response MockResponse
}
// NewMockServer sets up a new MockServer with properties ad starts the server.
func NewMockServer(props ...MockServerProperties) *httptest.Server {
handler := http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
for _, proc := range props {
_, err := w.Write(proc.Response.Body)
if err != nil {
log.Fatal(err)
}
}
})
return httptest.NewServer(handler)
}
func TestActiveAlerts(t *testing.T) {
responsebody := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"GatekeeperViolations","alertstate":"firing","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"PodCrashing-dev","alertstate":"firing","container":"deployment","instance":"1.2.3.4:8080","job":"kube-state-metrics","namespace":"dev","pod":"dev-deployment-78dcbmf25v","severity":"critical","team":"dev"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"PodRestart-dev","alertstate":"firing","container":"deployment","instance":"1.2.3.4:1234","job":"kube-state-metrics","namespace":"qa","pod":"qa-job-deployment-78dcbmf25v","severity":"warning","team":"qa"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"PrometheusTargetDown","alertstate":"firing","job":"kubernetes-pods","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"ScheduledRebootFailing","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`
addr := "http://localhost:10001"
for _, tc := range []struct {
it string
rFilter string
respBody string
aName string
wantN int
firingOnly bool
}{
{
it: "should return no active alerts",
respBody: responsebody,
rFilter: "",
wantN: 0,
firingOnly: false,
},
{
it: "should return a subset of all alerts",
respBody: responsebody,
rFilter: "Pod",
wantN: 3,
firingOnly: false,
},
{
it: "should return all active alerts by regex",
respBody: responsebody,
rFilter: "*",
wantN: 5,
firingOnly: false,
},
{
it: "should return all active alerts by regex filter",
respBody: responsebody,
rFilter: "*",
wantN: 5,
firingOnly: false,
},
{
it: "should return only firing alerts if firingOnly is true",
respBody: responsebody,
rFilter: "*",
wantN: 4,
firingOnly: true,
},
{
it: "should return ScheduledRebootFailing active alerts",
respBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"ScheduledRebootFailing","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`,
aName: "ScheduledRebootFailing",
rFilter: "*",
wantN: 1,
firingOnly: false,
},
{
it: "should not return an active alert if RebootRequired is firing (regex filter)",
respBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"RebootRequired","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`,
rFilter: "RebootRequired",
wantN: 0,
firingOnly: false,
},
} {
// Start mockServer
mockServer := NewMockServer(MockServerProperties{
URI: addr,
HTTPMethod: http.MethodPost,
Response: MockResponse{
Body: []byte(tc.respBody),
},
})
// Close mockServer after all connections are gone
defer mockServer.Close()
t.Run(tc.it, func(t *testing.T) {
// regex filter
regex, _ := regexp.Compile(tc.rFilter)
// instantiate the prometheus client with the mockserver-address
p, err := NewPromClient(api.Config{Address: mockServer.URL})
if err != nil {
log.Fatal(err)
}
result, err := p.ActiveAlerts(regex, tc.firingOnly)
if err != nil {
log.Fatal(err)
}
// assert
assert.Equal(t, tc.wantN, len(result), "expected amount of alerts %v, got %v", tc.wantN, len(result))
if tc.aName != "" {
assert.Equal(t, tc.aName, result[0], "expected active alert %v, got %v", tc.aName, result[0])
}
})
}
}

View File

@@ -1,21 +0,0 @@
// Package blockers provides interfaces and implementations for determining
// whether a system should be prevented to reboot.
// You can use that package if you fork Kured's main loop.
package blockers
// RebootBlocked checks that a single block Checker
// will block the reboot or not.
func RebootBlocked(blockers ...RebootBlocker) bool {
for _, blocker := range blockers {
if blocker.IsBlocked() {
return true
}
}
return false
}
// RebootBlocker interface should be implemented by types
// to know if their instantiations should block a reboot
type RebootBlocker interface {
IsBlocked() bool
}

View File

@@ -1,65 +0,0 @@
package blockers
import (
papi "github.com/prometheus/client_golang/api"
"testing"
)
type BlockingChecker struct {
blocking bool
}
func (fbc BlockingChecker) IsBlocked() bool {
return fbc.blocking
}
func Test_rebootBlocked(t *testing.T) {
noCheckers := []RebootBlocker{}
nonblockingChecker := BlockingChecker{blocking: false}
blockingChecker := BlockingChecker{blocking: true}
// Instantiate a prometheusClient with a broken_url
brokenPrometheusClient := NewPrometheusBlockingChecker(papi.Config{Address: "broken_url"}, nil, false, false)
type args struct {
blockers []RebootBlocker
}
tests := []struct {
name string
args args
want bool
}{
{
name: "Do not block on no blocker defined",
args: args{blockers: noCheckers},
want: false,
},
{
name: "Ensure a blocker blocks",
args: args{blockers: []RebootBlocker{blockingChecker}},
want: true,
},
{
name: "Ensure a non-blocker doesn't block",
args: args{blockers: []RebootBlocker{nonblockingChecker}},
want: false,
},
{
name: "Ensure one blocker is enough to block",
args: args{blockers: []RebootBlocker{nonblockingChecker, blockingChecker}},
want: true,
},
{
name: "Do block on error contacting prometheus API",
args: args{blockers: []RebootBlocker{brokenPrometheusClient}},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := RebootBlocked(tt.args.blockers...); got != tt.want {
t.Errorf("rebootBlocked() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -1,64 +0,0 @@
package blockers
import (
"context"
"fmt"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// Compile-time checks to ensure the type implements the interface
var (
_ RebootBlocker = (*KubernetesBlockingChecker)(nil)
)
// KubernetesBlockingChecker contains info for connecting
// to k8s, and can give info about whether a reboot should be blocked
type KubernetesBlockingChecker struct {
// client used to contact kubernetes API
client *kubernetes.Clientset
nodeName string
// lised used to filter pods (podSelector)
filter []string
}
// NewKubernetesBlockingChecker creates a new KubernetesBlockingChecker using the provided Kubernetes client,
// node name, and pod selectors.
func NewKubernetesBlockingChecker(client *kubernetes.Clientset, nodename string, podSelectors []string) *KubernetesBlockingChecker {
return &KubernetesBlockingChecker{
client: client,
nodeName: nodename,
filter: podSelectors,
}
}
// IsBlocked for the KubernetesBlockingChecker will check if a pod, for the node, is preventing
// the reboot. It will warn in the logs about blocking, but does not return an error.
func (kb KubernetesBlockingChecker) IsBlocked() bool {
fieldSelector := fmt.Sprintf("spec.nodeName=%s,status.phase!=Succeeded,status.phase!=Failed,status.phase!=Unknown", kb.nodeName)
for _, labelSelector := range kb.filter {
podList, err := kb.client.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{
LabelSelector: labelSelector,
FieldSelector: fieldSelector,
Limit: 10})
if err != nil {
log.Warnf("Reboot blocked: pod query error: %v", err)
return true
}
if len(podList.Items) > 0 {
podNames := make([]string, 0, len(podList.Items))
for _, pod := range podList.Items {
podNames = append(podNames, pod.Name)
}
if len(podList.Continue) > 0 {
podNames = append(podNames, "...")
}
log.Warnf("Reboot blocked: matching pods: %v", podNames)
return true
}
}
return false
}

View File

@@ -1,121 +0,0 @@
package blockers
import (
"context"
"fmt"
"regexp"
"sort"
"time"
papi "github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
log "github.com/sirupsen/logrus"
)
// Compile-time checks to ensure the type implements the interface
var (
_ RebootBlocker = (*PrometheusBlockingChecker)(nil)
)
// PrometheusBlockingChecker contains info for connecting
// to prometheus, and can give info about whether a reboot should be blocked
type PrometheusBlockingChecker struct {
promConfig papi.Config
// regexp used to get alerts
filter *regexp.Regexp
// bool to indicate if only firing alerts should be considered
firingOnly bool
// bool to indicate that we're only blocking on alerts which match the filter
filterMatchOnly bool
// storing the promClient
promClient papi.Client
}
// NewPrometheusBlockingChecker creates a new PrometheusBlockingChecker using the given
// Prometheus API config, alert filter, and filtering options.
func NewPrometheusBlockingChecker(config papi.Config, alertFilter *regexp.Regexp, firingOnly bool, filterMatchOnly bool) PrometheusBlockingChecker {
promClient, _ := papi.NewClient(config)
return PrometheusBlockingChecker{
promConfig: config,
filter: alertFilter,
firingOnly: firingOnly,
filterMatchOnly: filterMatchOnly,
promClient: promClient,
}
}
// IsBlocked for the prometheus will check if there are active alerts matching
// the arguments given into the PrometheusBlockingChecker which would actively
// block the reboot.
// As of today, no blocker information is shared as a return of the method,
// and the information is simply logged.
func (pb PrometheusBlockingChecker) IsBlocked() bool {
alertNames, err := pb.ActiveAlerts()
if err != nil {
log.Warnf("Reboot blocked: prometheus query error: %v", err)
return true
}
count := len(alertNames)
if count > 10 {
alertNames = append(alertNames[:10], "...")
}
if count > 0 {
log.Warnf("Reboot blocked: %d active alerts: %v", count, alertNames)
return true
}
return false
}
// MetricLabel is used to give a fancier name
// than the type to the label for rebootBlockedCounter
func (pb PrometheusBlockingChecker) MetricLabel() string {
return "prometheus"
}
// ActiveAlerts is a method of type promClient, it returns a list of names of active alerts
// (e.g. pending or firing), filtered by the supplied regexp or by the includeLabels query.
// filter by regexp means when the regexp finds the alert-name; the alert is excluded from the
// block-list and will NOT block rebooting. query by includeLabel means,
// if the query finds an alert, it will include it to the block-list, and it WILL block rebooting.
func (pb PrometheusBlockingChecker) ActiveAlerts() ([]string, error) {
api := v1.NewAPI(pb.promClient)
// get all alerts from prometheus
value, _, err := api.Query(context.Background(), "ALERTS", time.Now())
if err != nil {
return nil, err
}
if value.Type() == model.ValVector {
if vector, ok := value.(model.Vector); ok {
activeAlertSet := make(map[string]bool)
for _, sample := range vector {
if alertName, isAlert := sample.Metric[model.AlertNameLabel]; isAlert && sample.Value != 0 {
if matchesRegex(pb.filter, string(alertName), pb.filterMatchOnly) && (!pb.firingOnly || sample.Metric["alertstate"] == "firing") {
activeAlertSet[string(alertName)] = true
}
}
}
var activeAlerts []string
for activeAlert := range activeAlertSet {
activeAlerts = append(activeAlerts, activeAlert)
}
sort.Strings(activeAlerts)
return activeAlerts, nil
}
}
return nil, fmt.Errorf("unexpected value type %v", value)
}
func matchesRegex(filter *regexp.Regexp, alertName string, filterMatchOnly bool) bool {
if filter == nil {
return true
}
return filter.MatchString(alertName) == filterMatchOnly
}

View File

@@ -1,163 +0,0 @@
package blockers
import (
"log"
"net/http"
"net/http/httptest"
"regexp"
"testing"
"github.com/prometheus/client_golang/api"
"github.com/stretchr/testify/assert"
)
type MockResponse struct {
StatusCode int
Body []byte
}
// MockServerProperties ties a mock response to a url and a method
type MockServerProperties struct {
URI string
HTTPMethod string
Response MockResponse
}
// NewMockServer sets up a new MockServer with properties ad starts the server.
func NewMockServer(props ...MockServerProperties) *httptest.Server {
handler := http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
for _, proc := range props {
_, err := w.Write(proc.Response.Body)
if err != nil {
log.Fatal(err)
}
}
})
return httptest.NewServer(handler)
}
func TestActiveAlerts(t *testing.T) {
responsebody := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"GatekeeperViolations","alertstate":"firing","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"PodCrashing-dev","alertstate":"firing","container":"deployment","instance":"1.2.3.4:8080","job":"kube-state-metrics","namespace":"dev","pod":"dev-deployment-78dcbmf25v","severity":"critical","team":"dev"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"PodRestart-dev","alertstate":"firing","container":"deployment","instance":"1.2.3.4:1234","job":"kube-state-metrics","namespace":"qa","pod":"qa-job-deployment-78dcbmf25v","severity":"warning","team":"qa"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"PrometheusTargetDown","alertstate":"firing","job":"kubernetes-pods","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"ScheduledRebootFailing","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`
addr := "http://localhost:10001"
for _, tc := range []struct {
it string
rFilter string
respBody string
aName string
wantN int
firingOnly bool
filterMatchOnly bool
}{
{
it: "should return no active alerts",
respBody: responsebody,
rFilter: "",
wantN: 0,
firingOnly: false,
filterMatchOnly: false,
},
{
it: "should return a subset of all alerts",
respBody: responsebody,
rFilter: "Pod",
wantN: 3,
firingOnly: false,
filterMatchOnly: false,
},
{
it: "should return a subset of all alerts",
respBody: responsebody,
rFilter: "Gatekeeper",
wantN: 1,
firingOnly: false,
filterMatchOnly: true,
},
{
it: "should return all active alerts by regex",
respBody: responsebody,
rFilter: "*",
wantN: 5,
firingOnly: false,
filterMatchOnly: false,
},
{
it: "should return all active alerts by regex filter",
respBody: responsebody,
rFilter: "*",
wantN: 5,
firingOnly: false,
filterMatchOnly: false,
},
{
it: "should return only firing alerts if firingOnly is true",
respBody: responsebody,
rFilter: "*",
wantN: 4,
firingOnly: true,
filterMatchOnly: false,
},
{
it: "should return ScheduledRebootFailing active alerts",
respBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"ScheduledRebootFailing","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`,
aName: "ScheduledRebootFailing",
rFilter: "*",
wantN: 1,
firingOnly: false,
filterMatchOnly: false,
},
{
it: "should not return an active alert if RebootRequired is firing (regex filter)",
respBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"RebootRequired","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`,
rFilter: "RebootRequired",
wantN: 0,
firingOnly: false,
filterMatchOnly: false,
},
{
it: "should not return an active alert if RebootRequired is firing (regex filter)",
respBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"RebootRequired","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`,
rFilter: "RebootRequired",
wantN: 1,
firingOnly: false,
filterMatchOnly: true,
},
} {
// Start mockServer
mockServer := NewMockServer(MockServerProperties{
URI: addr,
HTTPMethod: http.MethodPost,
Response: MockResponse{
Body: []byte(tc.respBody),
},
})
// Close mockServer after all connections are gone
defer mockServer.Close()
t.Run(tc.it, func(t *testing.T) {
// regex filter
regex, _ := regexp.Compile(tc.rFilter)
// instantiate the prometheus client with the mockserver-address
p := NewPrometheusBlockingChecker(api.Config{Address: mockServer.URL}, regex, tc.firingOnly, tc.filterMatchOnly)
result, err := p.ActiveAlerts()
if err != nil {
log.Fatal(err)
}
// assert
assert.Equal(t, tc.wantN, len(result), "expected amount of alerts %v, got %v", tc.wantN, len(result))
if tc.aName != "" {
assert.Equal(t, tc.aName, result[0], "expected active alert %v, got %v", tc.aName, result[0])
}
})
}
}

View File

@@ -1,117 +0,0 @@
// Package checkers provides interfaces and implementations for determining
// whether a system reboot is required. It includes checkers based on file
// presence or custom commands, and supports privileged command execution
// in containerized environments. These checkers are used by kured to
// detect conditions that should trigger node reboots.
// You can use that package if you fork Kured's main loop.
package checkers
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
"github.com/google/shlex"
log "github.com/sirupsen/logrus"
)
// Checker is the standard interface to use to check
// if a reboot is required. Its types must implement a
// CheckRebootRequired method which returns a single boolean
// clarifying whether a reboot is expected or not.
type Checker interface {
RebootRequired() bool
}
// FileRebootChecker is the default reboot checker.
// It is unprivileged, and tests the presence of a files
type FileRebootChecker struct {
FilePath string
}
// RebootRequired checks the file presence
// needs refactoring to also return an error, instead of leaking it inside the code.
// This needs refactoring to get rid of NewCommand
// This needs refactoring to only contain file location, instead of CheckCommand
func (rc FileRebootChecker) RebootRequired() bool {
if _, err := os.Stat(rc.FilePath); err == nil {
return true
}
return false
}
// NewFileRebootChecker is the constructor for the file based reboot checker
// TODO: Add extra input validation on filePath string here
func NewFileRebootChecker(filePath string) (*FileRebootChecker, error) {
return &FileRebootChecker{
FilePath: filePath,
}, nil
}
// CommandChecker is using a custom command to check
// if a reboot is required. There are two modes of behaviour,
// if Privileged is granted, the NamespacePid is used to nsenter
// the given PID's namespace.
type CommandChecker struct {
CheckCommand []string
NamespacePid int
Privileged bool
}
// RebootRequired for CommandChecker runs a command without returning
// any eventual error. This should be later refactored to return the errors,
// instead of logging and fataling them here.
func (rc CommandChecker) RebootRequired() bool {
bufStdout := new(bytes.Buffer)
bufStderr := new(bytes.Buffer)
// #nosec G204 -- CheckCommand is controlled and validated internally
cmd := exec.Command(rc.CheckCommand[0], rc.CheckCommand[1:]...)
cmd.Stdout = bufStdout
cmd.Stderr = bufStderr
if err := cmd.Run(); err != nil {
switch err := err.(type) {
case *exec.ExitError:
// We assume a non-zero exit code means 'reboot not required', but of course
// the user could have misconfigured the sentinel command or something else
// went wrong during its execution. In that case, not entering a reboot loop
// is the right thing to do, and we are logging stdout/stderr of the command
// so it should be obvious what is wrong.
if cmd.ProcessState.ExitCode() != 1 {
log.Warn(fmt.Sprintf("sentinel command ended with unexpected exit code: %v", cmd.ProcessState.ExitCode()), "cmd", strings.Join(cmd.Args, " "), "stdout", bufStdout.String(), "stderr", bufStderr.String())
}
return false
default:
// Something was grossly misconfigured, such as the command path being wrong.
log.Fatal(fmt.Sprintf("Error invoking sentinel command: %v", err), "cmd", strings.Join(cmd.Args, " "), "stdout", bufStdout.String(), "stderr", bufStderr.String())
}
}
log.Info("checking if reboot is required", "cmd", strings.Join(cmd.Args, " "), "stdout", bufStdout.String(), "stderr", bufStderr.String())
return true
}
// NewCommandChecker is the constructor for the commandChecker, and by default
// runs new commands in a privileged fashion.
// Privileged means wrapping the command with nsenter.
// It allows to run a command from systemd's namespace for example (pid 1)
// This relies on hostPID:true and privileged:true to enter host mount space
// For info, rancher based need different pid, which should be user given.
// until we have a better discovery mechanism.
func NewCommandChecker(sentinelCommand string, pid int, privileged bool) (*CommandChecker, error) {
var cmd []string
if privileged {
cmd = append(cmd, "/usr/bin/nsenter", fmt.Sprintf("-m/proc/%d/ns/mnt", pid), "--")
}
parsedCommand, err := shlex.Split(sentinelCommand)
if err != nil {
return nil, fmt.Errorf("error parsing provided sentinel command: %v", err)
}
cmd = append(cmd, parsedCommand...)
return &CommandChecker{
CheckCommand: cmd,
NamespacePid: pid,
Privileged: privileged,
}, nil
}

View File

@@ -1,87 +0,0 @@
package checkers
import (
log "github.com/sirupsen/logrus"
"reflect"
"testing"
)
func Test_nsEntering(t *testing.T) {
type args struct {
pid int
command string
privileged bool
}
tests := []struct {
name string
args args
want []string
}{
{
name: "Ensure command will run with nsenter",
args: args{pid: 1, command: "ls -Fal", privileged: true},
want: []string{"/usr/bin/nsenter", "-m/proc/1/ns/mnt", "--", "ls", "-Fal"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cc, _ := NewCommandChecker(tt.args.command, tt.args.pid, tt.args.privileged)
if !reflect.DeepEqual(cc.CheckCommand, tt.want) {
t.Errorf("command parsed as %v, want %v", cc.CheckCommand, tt.want)
}
})
}
}
func Test_rebootRequired(t *testing.T) {
type args struct {
sentinelCommand []string
}
tests := []struct {
name string
args args
want bool
fatals bool
}{
{
name: "Ensure rc = 0 means reboot required",
args: args{
sentinelCommand: []string{"true"},
},
want: true,
fatals: false,
},
{
name: "Ensure rc != 0 means reboot NOT required",
args: args{
sentinelCommand: []string{"false"},
},
want: false,
fatals: false,
},
{
name: "Ensure a wrong command fatals",
args: args{
sentinelCommand: []string{"./babar"},
},
want: true,
fatals: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defer func() { log.StandardLogger().ExitFunc = nil }()
fatal := false
log.StandardLogger().ExitFunc = func(int) { fatal = true }
a := CommandChecker{CheckCommand: tt.args.sentinelCommand, NamespacePid: 1, Privileged: false}
if got := a.RebootRequired(); got != tt.want {
t.Errorf("rebootRequired() = %v, want %v", got, tt.want)
}
if tt.fatals != fatal {
t.Errorf("fatal flag is %v, want fatal %v", fatal, tt.fatals)
}
})
}
}

View File

@@ -1,18 +1,11 @@
// Package daemonsetlock provides mechanisms for leader election and locking
// using Kubernetes DaemonSets. It enables distributed coordination of operations
// (such as reboots) by ensuring only one node acts as the leader at any time,
// leveraging Kubernetes primitives for safe, atomic locking in clusters.
package daemonsetlock
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -25,25 +18,6 @@ const (
k8sAPICallRetryTimeout = 5 * time.Minute // How long to wait until we determine that the k8s API is definitively unavailable
)
// Lock defines the interface for acquiring, releasing, and checking
// the status of a reboot coordination lock.
type Lock interface {
Acquire(NodeMeta) (bool, string, error)
Release() error
Holding() (bool, LockAnnotationValue, error)
}
// GenericLock holds the configuration for lock TTL and the delay before releasing it.
type GenericLock struct {
TTL time.Duration
releaseDelay time.Duration
}
// NodeMeta contains metadata about a node relevant to scheduling decisions.
type NodeMeta struct {
Unschedulable bool `json:"unschedulable"`
}
// DaemonSetLock holds all necessary information to do actions
// on the kured ds which holds lock info through annotations.
type DaemonSetLock struct {
@@ -54,98 +28,29 @@ type DaemonSetLock struct {
annotation string
}
// DaemonSetSingleLock holds all necessary information to do actions
// on the kured ds which holds lock info through annotations.
type DaemonSetSingleLock struct {
GenericLock
DaemonSetLock
}
// DaemonSetMultiLock holds all necessary information to do actions
// on the kured ds which holds lock info through annotations, valid
// for multiple nodes
type DaemonSetMultiLock struct {
GenericLock
DaemonSetLock
maxOwners int
}
// LockAnnotationValue contains the lock data,
// which allows persistence across reboots, particularily recording if the
// node was already unschedulable before kured reboot.
// To be modified when using another type of lock storage.
type LockAnnotationValue struct {
type lockAnnotationValue struct {
NodeID string `json:"nodeID"`
Metadata NodeMeta `json:"metadata,omitempty"`
Metadata interface{} `json:"metadata,omitempty"`
Created time.Time `json:"created"`
TTL time.Duration `json:"TTL"`
}
type multiLockAnnotationValue struct {
MaxOwners int `json:"maxOwners"`
LockAnnotations []LockAnnotationValue `json:"locks"`
}
// New creates a daemonsetLock object containing the necessary data for follow up k8s requests
func New(client *kubernetes.Clientset, nodeID, namespace, name, annotation string, TTL time.Duration, concurrency int, lockReleaseDelay time.Duration) Lock {
if concurrency > 1 {
return &DaemonSetMultiLock{
GenericLock: GenericLock{
TTL: TTL,
releaseDelay: lockReleaseDelay,
},
DaemonSetLock: DaemonSetLock{
client: client,
nodeID: nodeID,
namespace: namespace,
name: name,
annotation: annotation,
},
maxOwners: concurrency,
}
}
return &DaemonSetSingleLock{
GenericLock: GenericLock{
TTL: TTL,
releaseDelay: lockReleaseDelay,
},
DaemonSetLock: DaemonSetLock{
client: client,
nodeID: nodeID,
namespace: namespace,
name: name,
annotation: annotation,
},
}
}
// GetDaemonSet returns the named DaemonSet resource from the DaemonSetLock's configured client
func (dsl *DaemonSetLock) GetDaemonSet(sleep, timeout time.Duration) (*v1.DaemonSet, error) {
var ds *v1.DaemonSet
var lastError error
err := wait.PollUntilContextTimeout(context.Background(), sleep, timeout, true, func(ctx context.Context) (bool, error) {
if ds, lastError = dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(ctx, dsl.name, metav1.GetOptions{}); lastError != nil {
return false, nil
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %v", dsl.name, dsl.namespace, lastError)
}
return ds, nil
func New(client *kubernetes.Clientset, nodeID, namespace, name, annotation string) *DaemonSetLock {
return &DaemonSetLock{client, nodeID, namespace, name, annotation}
}
// Acquire attempts to annotate the kured daemonset with lock info from instantiated DaemonSetLock using client-go
func (dsl *DaemonSetSingleLock) Acquire(nodeMetadata NodeMeta) (bool, string, error) {
func (dsl *DaemonSetLock) Acquire(metadata interface{}, TTL time.Duration) (bool, string, error) {
for {
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
if err != nil {
return false, "", fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.Annotations[dsl.annotation]
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
if exists {
value := LockAnnotationValue{}
value := lockAnnotationValue{}
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
return false, "", err
}
@@ -155,22 +60,15 @@ func (dsl *DaemonSetSingleLock) Acquire(nodeMetadata NodeMeta) (bool, string, er
}
}
if ds.Annotations == nil {
ds.Annotations = make(map[string]string)
if ds.ObjectMeta.Annotations == nil {
ds.ObjectMeta.Annotations = make(map[string]string)
}
value := LockAnnotationValue{
NodeID: dsl.nodeID,
Metadata: nodeMetadata,
Created: time.Now().UTC(),
TTL: dsl.TTL,
}
value := lockAnnotationValue{NodeID: dsl.nodeID, Metadata: metadata, Created: time.Now().UTC(), TTL: TTL}
valueBytes, err := json.Marshal(&value)
if err != nil {
return false, "", err
}
ds.Annotations[dsl.annotation] = string(valueBytes)
ds.ObjectMeta.Annotations[dsl.annotation] = string(valueBytes)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
if err != nil {
@@ -178,64 +76,59 @@ func (dsl *DaemonSetSingleLock) Acquire(nodeMetadata NodeMeta) (bool, string, er
// Something else updated the resource between us reading and writing - try again soon
time.Sleep(time.Second)
continue
} else {
return false, "", err
}
return false, "", err
}
return true, dsl.nodeID, nil
}
}
// Holding checks if the current node still holds the lock based on the DaemonSet annotation.
func (dsl *DaemonSetSingleLock) Holding() (bool, LockAnnotationValue, error) {
var lockData LockAnnotationValue
// Test attempts to check the kured daemonset lock status (existence, expiry) from instantiated DaemonSetLock using client-go
func (dsl *DaemonSetLock) Test(metadata interface{}) (bool, error) {
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
if err != nil {
return false, lockData, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
return false, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.Annotations[dsl.annotation]
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
if exists {
value := LockAnnotationValue{}
value := lockAnnotationValue{Metadata: metadata}
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
return false, lockData, err
return false, err
}
if !ttlExpired(value.Created, value.TTL) {
return value.NodeID == dsl.nodeID, value, nil
return value.NodeID == dsl.nodeID, nil
}
}
return false, lockData, nil
return false, nil
}
// Release attempts to remove the lock data from the kured ds annotations using client-go
func (dsl *DaemonSetSingleLock) Release() error {
if dsl.releaseDelay > 0 {
log.Infof("Waiting %v before releasing lock", dsl.releaseDelay)
time.Sleep(dsl.releaseDelay)
}
func (dsl *DaemonSetLock) Release() error {
for {
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
if err != nil {
return fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.Annotations[dsl.annotation]
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
if exists {
value := LockAnnotationValue{}
value := lockAnnotationValue{}
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
return err
}
if value.NodeID != dsl.nodeID {
return fmt.Errorf("not lock holder: %v", value.NodeID)
return fmt.Errorf("Not lock holder: %v", value.NodeID)
}
} else {
return fmt.Errorf("lock not held")
return fmt.Errorf("Lock not held")
}
delete(ds.Annotations, dsl.annotation)
delete(ds.ObjectMeta.Annotations, dsl.annotation)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
if err != nil {
@@ -243,179 +136,35 @@ func (dsl *DaemonSetSingleLock) Release() error {
// Something else updated the resource between us reading and writing - try again soon
time.Sleep(time.Second)
continue
} else {
return err
}
return err
}
return nil
}
}
// GetDaemonSet returns the named DaemonSet resource from the DaemonSetLock's configured client
func (dsl *DaemonSetLock) GetDaemonSet(sleep, timeout time.Duration) (*v1.DaemonSet, error) {
var ds *v1.DaemonSet
var lastError error
err := wait.PollImmediate(sleep, timeout, func() (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if ds, lastError = dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(ctx, dsl.name, metav1.GetOptions{}); lastError != nil {
return false, nil
}
return true, nil
})
if err != nil {
return nil, fmt.Errorf("Timed out trying to get daemonset %s in namespace %s: %v", dsl.name, dsl.namespace, lastError)
}
return ds, nil
}
func ttlExpired(created time.Time, ttl time.Duration) bool {
if ttl > 0 && time.Since(created) >= ttl {
return true
}
return false
}
func nodeIDsFromMultiLock(annotation multiLockAnnotationValue) []string {
nodeIDs := make([]string, 0, len(annotation.LockAnnotations))
for _, nodeLock := range annotation.LockAnnotations {
nodeIDs = append(nodeIDs, nodeLock.NodeID)
}
return nodeIDs
}
func (dsl *DaemonSetLock) canAcquireMultiple(annotation multiLockAnnotationValue, metadata NodeMeta, TTL time.Duration, maxOwners int) (bool, multiLockAnnotationValue) {
newAnnotation := multiLockAnnotationValue{MaxOwners: maxOwners}
freeSpace := false
if annotation.LockAnnotations == nil || len(annotation.LockAnnotations) < maxOwners {
freeSpace = true
newAnnotation.LockAnnotations = annotation.LockAnnotations
} else {
for _, nodeLock := range annotation.LockAnnotations {
if ttlExpired(nodeLock.Created, nodeLock.TTL) {
freeSpace = true
continue
}
newAnnotation.LockAnnotations = append(
newAnnotation.LockAnnotations,
nodeLock,
)
}
}
if freeSpace {
newAnnotation.LockAnnotations = append(
newAnnotation.LockAnnotations,
LockAnnotationValue{
NodeID: dsl.nodeID,
Metadata: metadata,
Created: time.Now().UTC(),
TTL: TTL,
},
)
return true, newAnnotation
}
return false, multiLockAnnotationValue{}
}
// Acquire creates and annotates the daemonset with a multiple owner lock
func (dsl *DaemonSetMultiLock) Acquire(nodeMetaData NodeMeta) (bool, string, error) {
for {
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
if err != nil {
return false, "", fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
annotation := multiLockAnnotationValue{}
valueString, exists := ds.Annotations[dsl.annotation]
if exists {
if err := json.Unmarshal([]byte(valueString), &annotation); err != nil {
return false, "", fmt.Errorf("error getting multi lock: %w", err)
}
}
lockPossible, newAnnotation := dsl.canAcquireMultiple(annotation, nodeMetaData, dsl.TTL, dsl.maxOwners)
if !lockPossible {
return false, strings.Join(nodeIDsFromMultiLock(newAnnotation), ","), nil
}
if ds.Annotations == nil {
ds.Annotations = make(map[string]string)
}
newAnnotationBytes, err := json.Marshal(&newAnnotation)
if err != nil {
return false, "", fmt.Errorf("error marshalling new annotation lock: %w", err)
}
ds.Annotations[dsl.annotation] = string(newAnnotationBytes)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.Background(), ds, metav1.UpdateOptions{})
if err != nil {
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
time.Sleep(time.Second)
continue
}
return false, "", fmt.Errorf("error updating daemonset with multi lock: %w", err)
}
return true, strings.Join(nodeIDsFromMultiLock(newAnnotation), ","), nil
}
}
// Holding checks whether the current node is holding a valid lock for the DaemonSetMultiLock.
func (dsl *DaemonSetMultiLock) Holding() (bool, LockAnnotationValue, error) {
var lockdata LockAnnotationValue
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
if err != nil {
return false, lockdata, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.Annotations[dsl.annotation]
if exists {
value := multiLockAnnotationValue{}
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
return false, lockdata, err
}
for _, nodeLock := range value.LockAnnotations {
if nodeLock.NodeID == dsl.nodeID && !ttlExpired(nodeLock.Created, nodeLock.TTL) {
return true, nodeLock, nil
}
}
}
return false, lockdata, nil
}
// Release attempts to remove the lock data for a single node from the multi node annotation
func (dsl *DaemonSetMultiLock) Release() error {
if dsl.releaseDelay > 0 {
log.Infof("Waiting %v before releasing lock", dsl.releaseDelay)
time.Sleep(dsl.releaseDelay)
}
for {
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
if err != nil {
return fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
}
valueString, exists := ds.Annotations[dsl.annotation]
modified := false
value := multiLockAnnotationValue{}
if exists {
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
return err
}
for idx, nodeLock := range value.LockAnnotations {
if nodeLock.NodeID == dsl.nodeID {
value.LockAnnotations = append(value.LockAnnotations[:idx], value.LockAnnotations[idx+1:]...)
modified = true
break
}
}
}
if !exists || !modified {
return fmt.Errorf("Lock not held")
}
newAnnotationBytes, err := json.Marshal(value)
if err != nil {
return fmt.Errorf("error marshalling new annotation on release: %v", err)
}
ds.Annotations[dsl.annotation] = string(newAnnotationBytes)
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
if err != nil {
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
// Something else updated the resource between us reading and writing - try again soon
time.Sleep(time.Second)
continue
}
return err
}
return nil
}
}

View File

@@ -1,8 +1,6 @@
package daemonsetlock
import (
"reflect"
"sort"
"testing"
"time"
)
@@ -28,181 +26,3 @@ func TestTtlExpired(t *testing.T) {
}
}
}
func multiLockAnnotationsAreEqualByNodes(src, dst multiLockAnnotationValue) bool {
srcNodes := []string{}
for _, srcLock := range src.LockAnnotations {
srcNodes = append(srcNodes, srcLock.NodeID)
}
sort.Strings(srcNodes)
dstNodes := []string{}
for _, dstLock := range dst.LockAnnotations {
dstNodes = append(dstNodes, dstLock.NodeID)
}
sort.Strings(dstNodes)
return reflect.DeepEqual(srcNodes, dstNodes)
}
func TestCanAcquireMultiple(t *testing.T) {
node1Name := "n1"
node2Name := "n2"
node3Name := "n3"
testCases := []struct {
name string
daemonSetLock DaemonSetLock
maxOwners int
current multiLockAnnotationValue
desired multiLockAnnotationValue
lockPossible bool
}{
{
name: "empty_lock",
daemonSetLock: DaemonSetLock{
nodeID: node1Name,
},
maxOwners: 2,
current: multiLockAnnotationValue{},
desired: multiLockAnnotationValue{
MaxOwners: 2,
LockAnnotations: []LockAnnotationValue{
{NodeID: node1Name},
},
},
lockPossible: true,
},
{
name: "partial_lock",
daemonSetLock: DaemonSetLock{
nodeID: node1Name,
},
maxOwners: 2,
current: multiLockAnnotationValue{
MaxOwners: 2,
LockAnnotations: []LockAnnotationValue{
{NodeID: node2Name},
},
},
desired: multiLockAnnotationValue{
MaxOwners: 2,
LockAnnotations: []LockAnnotationValue{
{NodeID: node1Name},
{NodeID: node2Name},
},
},
lockPossible: true,
},
{
name: "full_lock",
daemonSetLock: DaemonSetLock{
nodeID: node1Name,
},
maxOwners: 2,
current: multiLockAnnotationValue{
MaxOwners: 2,
LockAnnotations: []LockAnnotationValue{
{
NodeID: node2Name,
Created: time.Now().UTC().Add(-1 * time.Minute),
TTL: time.Hour,
},
{
NodeID: node3Name,
Created: time.Now().UTC().Add(-1 * time.Minute),
TTL: time.Hour,
},
},
},
desired: multiLockAnnotationValue{
MaxOwners: 2,
LockAnnotations: []LockAnnotationValue{
{NodeID: node2Name},
{NodeID: node3Name},
},
},
lockPossible: false,
},
{
name: "full_with_one_expired_lock",
daemonSetLock: DaemonSetLock{
nodeID: node1Name,
},
maxOwners: 2,
current: multiLockAnnotationValue{
MaxOwners: 2,
LockAnnotations: []LockAnnotationValue{
{
NodeID: node2Name,
Created: time.Now().UTC().Add(-1 * time.Hour),
TTL: time.Minute,
},
{
NodeID: node3Name,
Created: time.Now().UTC().Add(-1 * time.Minute),
TTL: time.Hour,
},
},
},
desired: multiLockAnnotationValue{
MaxOwners: 2,
LockAnnotations: []LockAnnotationValue{
{NodeID: node1Name},
{NodeID: node3Name},
},
},
lockPossible: true,
},
{
name: "full_with_all_expired_locks",
daemonSetLock: DaemonSetLock{
nodeID: node1Name,
},
maxOwners: 2,
current: multiLockAnnotationValue{
MaxOwners: 2,
LockAnnotations: []LockAnnotationValue{
{
NodeID: node2Name,
Created: time.Now().UTC().Add(-1 * time.Hour),
TTL: time.Minute,
},
{
NodeID: node3Name,
Created: time.Now().UTC().Add(-1 * time.Hour),
TTL: time.Minute,
},
},
},
desired: multiLockAnnotationValue{
MaxOwners: 2,
LockAnnotations: []LockAnnotationValue{
{NodeID: node1Name},
},
},
lockPossible: true,
},
}
nm := NodeMeta{Unschedulable: false}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
lockPossible, actual := testCase.daemonSetLock.canAcquireMultiple(testCase.current, nm, time.Minute, testCase.maxOwners)
if lockPossible != testCase.lockPossible {
t.Fatalf(
"unexpected result for lock possible (got %t expected %t new annotation %v",
lockPossible,
testCase.lockPossible,
actual,
)
}
if lockPossible && (!multiLockAnnotationsAreEqualByNodes(actual, testCase.desired) || testCase.desired.MaxOwners != actual.MaxOwners) {
t.Fatalf(
"expected lock %v but got %v",
testCase.desired,
actual,
)
}
})
}
}

View File

@@ -1,9 +1,3 @@
// Package delaytick provides utilities for scheduling periodic events
// with an initial randomized delay. It is primarily used to delay the
// start of regular ticks, helping to avoid thundering herd problems
// when multiple nodes begin operations simultaneously.
// You can use that a random ticker in other projects, but there is
// no garantee that it will stay (initial plan was to move it to internal)
package delaytick
import (
@@ -16,7 +10,6 @@ func New(s rand.Source, d time.Duration) <-chan time.Time {
c := make(chan time.Time)
go func() {
// #nosec G404 -- math/rand is used here for non-security timing jitter
random := rand.New(s)
time.Sleep(time.Duration(float64(d)/2 + float64(d)*random.Float64()))
c <- time.Now()

View File

@@ -1,49 +0,0 @@
package reboot
import (
"bytes"
"fmt"
"os/exec"
"strings"
"github.com/google/shlex"
log "github.com/sirupsen/logrus"
)
// CommandRebooter holds context-information for a reboot with command
type CommandRebooter struct {
RebootCommand []string
}
// Reboot triggers the reboot command
func (c CommandRebooter) Reboot() error {
log.Infof("Invoking command: %s", c.RebootCommand)
bufStdout := new(bytes.Buffer)
bufStderr := new(bytes.Buffer)
cmd := exec.Command(c.RebootCommand[0], c.RebootCommand[1:]...) // #nosec G204
cmd.Stdout = bufStdout
cmd.Stderr = bufStderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("error invoking reboot command %s: %v (stdout: %v, stderr: %v)", c.RebootCommand, err, bufStdout.String(), bufStderr.String())
}
log.Info("Invoked reboot command", "cmd", strings.Join(cmd.Args, " "), "stdout", bufStdout.String(), "stderr", bufStderr.String())
return nil
}
// NewCommandRebooter is the constructor to create a CommandRebooter from a string not
// yet shell lexed. You can skip this constructor if you parse the data correctly first
// when instantiating a CommandRebooter instance.
func NewCommandRebooter(rebootCommand string) (*CommandRebooter, error) {
if rebootCommand == "" {
return nil, fmt.Errorf("no reboot command specified")
}
cmd := []string{"/usr/bin/nsenter", fmt.Sprintf("-m/proc/%d/ns/mnt", 1), "--"}
parsedCommand, err := shlex.Split(rebootCommand)
if err != nil {
return nil, fmt.Errorf("error %v when parsing reboot command %s", err, rebootCommand)
}
cmd = append(cmd, parsedCommand...)
return &CommandRebooter{RebootCommand: cmd}, nil
}

View File

@@ -1,43 +0,0 @@
package reboot
import (
"reflect"
"testing"
)
func TestNewCommandRebooter(t *testing.T) {
type args struct {
rebootCommand string
}
tests := []struct {
name string
args args
want *CommandRebooter
wantErr bool
}{
{
name: "Ensure command is nsenter wrapped",
args: args{"ls -Fal"},
want: &CommandRebooter{RebootCommand: []string{"/usr/bin/nsenter", "-m/proc/1/ns/mnt", "--", "ls", "-Fal"}},
wantErr: false,
},
{
name: "Ensure empty command is erroring",
args: args{""},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := NewCommandRebooter(tt.args.rebootCommand)
if (err != nil) != tt.wantErr {
t.Errorf("NewCommandRebooter() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewCommandRebooter() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -1,13 +0,0 @@
// Package reboot provides mechanisms to trigger node reboots using different
// methods, like custom commands or signals.
// Each of those includes constructors and interfaces for handling different reboot
// strategies, supporting privileged command execution via nsenter for containerized environments.
package reboot
// Rebooter is the standard interface to use to execute
// the reboot, after it has been considered as necessary.
// The Reboot method does not expect any return, yet should
// most likely be refactored in the future to return an error
type Rebooter interface {
Reboot() error
}

View File

@@ -1,37 +0,0 @@
package reboot
import (
"fmt"
"os"
"syscall"
)
// SignalRebooter holds context-information for a signal reboot.
type SignalRebooter struct {
Signal int
}
// Reboot triggers the reboot signal
func (c SignalRebooter) Reboot() error {
process, err := os.FindProcess(1)
if err != nil {
return fmt.Errorf("not running on Unix: %v", err)
}
err = process.Signal(syscall.Signal(c.Signal))
// Either PID does not exist, or the signal does not work. Hoping for
// a decent enough error.
if err != nil {
return fmt.Errorf("signal of SIGRTMIN+5 failed: %v", err)
}
return nil
}
// NewSignalRebooter is the constructor which sets the signal number.
// The constructor does not yet validate any input. It should be done in a later commit.
func NewSignalRebooter(sig int) (*SignalRebooter, error) {
if sig < 1 {
return nil, fmt.Errorf("invalid signal: %v", sig)
}
return &SignalRebooter{Signal: sig}, nil
}

View File

@@ -1,6 +1,3 @@
// Package taints provides utilities to manage Kubernetes node taints for controlling
// pod scheduling and execution. It allows setting, removing, and checking taints on nodes,
// using Kubernetes client-go and JSON patching for atomic updates.
package taints
import (

View File

@@ -50,7 +50,7 @@ func parseWeekdays(days []string) (weekdays, error) {
if err != nil {
return weekdays(0), err
}
// #nosec G115 -- weekday is guaranteed to be between 06 by parseWeekday()
result |= 1 << uint32(weekday)
}
@@ -59,7 +59,6 @@ func parseWeekdays(days []string) (weekdays, error) {
// Contains returns true if the specified weekday is a member of this set.
func (w weekdays) Contains(day time.Weekday) bool {
// #nosec G115 -- day is time.Weekday [0-6], shift safe within uint32
return uint32(w)&(1<<uint32(day)) != 0
}
@@ -82,11 +81,11 @@ func parseWeekday(day string) (time.Weekday, error) {
if n >= 0 && n < 7 {
return time.Weekday(n), nil
}
return time.Sunday, fmt.Errorf("invalid weekday, number out of range: %s", day)
return time.Sunday, fmt.Errorf("Invalid weekday, number out of range: %s", day)
}
if weekday, ok := dayStrings[strings.ToLower(day)]; ok {
return weekday, nil
}
return time.Sunday, fmt.Errorf("invalid weekday: %s", day)
return time.Sunday, fmt.Errorf("Invalid weekday: %s", day)
}

View File

@@ -1,7 +1,3 @@
// Package timewindow provides utilities for handling days of the week,
// including parsing, representing, and manipulating sets of weekdays.
// It enables flexible specification of time windows for reboot operations
// in kured, supporting various formats and convenience functions.
package timewindow
import (
@@ -81,5 +77,5 @@ func parseTime(s string, loc *time.Location) (time.Time, error) {
}
}
return time.Now(), fmt.Errorf("invalid time format: %s", s)
return time.Now(), fmt.Errorf("Invalid time format: %s", s)
}

View File

@@ -0,0 +1,12 @@
#!/usr/bin/env bash
# USE KUBECTL_CMD to pass context and/or namespaces.
KUBECTL_CMD="${KUBECTL_CMD:-kubectl}"
SENTINEL_FILE="${SENTINEL_FILE:-/var/run/reboot-required}"
echo "Creating reboot sentinel on all nodes"
for nodename in $("$KUBECTL_CMD" get nodes -o name); do
docker exec "${nodename/node\//}" hostname
docker exec "${nodename/node\//}" touch "${SENTINEL_FILE}"
done

View File

@@ -1,14 +1,11 @@
#!/usr/bin/env bash
REBOOTCOUNT=${REBOOTCOUNT:-2} # By default we only create two sentinels in create-reboot-sentinels.
NODECOUNT=${NODECOUNT:-5}
KUBECTL_CMD="${KUBECTL_CMD:-kubectl}"
DEBUG="${DEBUG:-false}"
CONTAINER_NAME_FORMAT=${CONTAINER_NAME_FORMAT:-"chart-testing-*"}
kubectl_flags=( )
[[ "$1" != "" ]] && kubectl_flags=("${kubectl_flags[@]}" --context "$1")
tmp_dir=$(mktemp -d -t kured-XXXX)
function gather_logs_and_cleanup {
if [[ -f "$tmp_dir"/node_output ]]; then
rm "$tmp_dir"/node_output
@@ -21,15 +18,15 @@ function gather_logs_and_cleanup {
# This is useful to see if containers have crashed.
echo "docker ps -a:"
docker ps -a
echo "docker journal logs"
journalctl -u docker --no-pager
echo "docker journal logs"
journalctl -u docker --no-pager
# This is useful to see if the nodes have _properly_ rebooted.
# It should show the reboot/two container starts per node.
for id in $(docker ps -a -q); do
for name in $(docker ps -a -f "name=${CONTAINER_NAME_FORMAT}" -q); do
echo "############################################################"
echo "docker logs for container $id:"
docker logs "$id"
echo "docker logs for container $name:"
docker logs "$name"
done
fi
@@ -38,28 +35,24 @@ trap gather_logs_and_cleanup EXIT
declare -A was_unschedulable
declare -A has_recovered
max_attempts="200"
sleep_time=5
max_attempts="60"
sleep_time=60
attempt_num=1
# Get docker info of each of those kind containers. If one has crashed, restart it.
set +o errexit
echo "There are $REBOOTCOUNT nodes total needing reboot in the cluster"
until [ ${#was_unschedulable[@]} == "$REBOOTCOUNT" ] && [ ${#has_recovered[@]} == "$REBOOTCOUNT" ]
echo "There are $NODECOUNT nodes in the cluster"
until [ ${#was_unschedulable[@]} == "$NODECOUNT" ] && [ ${#has_recovered[@]} == "$NODECOUNT" ]
do
echo "${#was_unschedulable[@]} nodes were removed from pool once:" "${!was_unschedulable[@]}"
echo "${#has_recovered[@]} nodes removed from the pool are now back:" "${!has_recovered[@]}"
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes -o custom-columns=NAME:.metadata.name,SCHEDULABLE:.spec.unschedulable --no-headers | grep -v control-plane > "$tmp_dir"/node_output
"$KUBECTL_CMD" get nodes -o custom-columns=NAME:.metadata.name,SCHEDULABLE:.spec.unschedulable --no-headers > "$tmp_dir"/node_output
if [[ "$DEBUG" == "true" ]]; then
# This is useful to see if a node gets stuck after drain, and doesn't
# come back up.
echo "Result of command kubectl unschedulable nodes:"
echo "Result of command $KUBECTL_CMD get nodes ... showing unschedulable nodes:"
cat "$tmp_dir"/node_output
fi
while read -r node; do
unschedulable=$(echo "$node" | grep true | cut -f 1 -d ' ')
if [ -n "$unschedulable" ] && [ -z ${was_unschedulable["$unschedulable"]+x} ] ; then
@@ -71,15 +64,9 @@ do
echo "$schedulable has recovered!"
has_recovered["$schedulable"]=1
fi
# If the container has crashed, restart it.
node_name=$(echo "$node" | cut -f 1 -d ' ')
stopped_container_id=$(docker container ls --filter=name="$node_name" --filter=status=exited -q)
if [ -n "$stopped_container_id" ]; then echo "Node $stopped_container_id needs restart"; docker start "$stopped_container_id"; echo "Container started."; fi
done < "$tmp_dir"/node_output
if [[ "${#has_recovered[@]}" == "$REBOOTCOUNT" ]]; then
if [[ "${#has_recovered[@]}" == "$NODECOUNT" ]]; then
echo "All nodes recovered."
break
else

View File

@@ -1,429 +0,0 @@
package kind
import (
"bytes"
"fmt"
"math/rand"
"os/exec"
"strconv"
"testing"
"time"
)
const (
kuredDevImage string = "kured:dev"
)
// KindTest cluster deployed by each TestMain function, prepared to run a given test scenario.
type KindTest struct {
kindConfigPath string
clusterName string
timeout time.Duration
deployManifests []string
localImages []string
logsDir string
logBuffer bytes.Buffer
testInstance *testing.T // Maybe move this to testing.TB
}
func (k *KindTest) Write(p []byte) (n int, err error) {
k.testInstance.Helper()
k.logBuffer.Write(p)
return len(p), nil
}
func (k *KindTest) FlushLog() {
k.testInstance.Helper()
k.testInstance.Log(k.logBuffer.String())
k.logBuffer.Reset()
}
func (k *KindTest) RunCmd(cmdDetails ...string) error {
cmd := exec.Command(cmdDetails[0], cmdDetails[1:]...)
// by making KindTest a Writer, we can simply wire k to logs
// writing to k will write to proper logs.
cmd.Stdout = k
cmd.Stderr = k
err := cmd.Run()
if err != nil {
return err
}
return nil
}
// Option that can be passed to the NewKind function in order to change the configuration
// of the test cluster
type Option func(k *KindTest)
// Deploy can be passed to NewKind to deploy extra components, in addition to the base deployment.
func Deploy(manifest string) Option {
return func(k *KindTest) {
k.deployManifests = append(k.deployManifests, manifest)
}
}
// ExportLogs can be passed to NewKind to specify the folder where the kubernetes logs will be exported after the tests.
func ExportLogs(folder string) Option {
return func(k *KindTest) {
k.logsDir = folder
}
}
// Timeout for long-running operations (e.g. deployments, readiness probes...)
func Timeout(t time.Duration) Option {
return func(k *KindTest) {
k.timeout = t
}
}
// LocalImage is passed to NewKind to allow loading a local Docker image into the cluster
func LocalImage(nameTag string) Option {
return func(k *KindTest) {
k.localImages = append(k.localImages, nameTag)
}
}
// NewKind creates a kind cluster given a name and set of Option instances.
func NewKindTester(kindClusterName string, filePath string, t *testing.T, options ...Option) *KindTest {
k := &KindTest{
clusterName: kindClusterName,
timeout: 10 * time.Minute,
kindConfigPath: filePath,
testInstance: t,
}
for _, option := range options {
option(k)
}
return k
}
// Prepare the kind cluster.
func (k *KindTest) Create() error {
err := k.RunCmd("kind", "create", "cluster", "--name", k.clusterName, "--config", k.kindConfigPath)
if err != nil {
return fmt.Errorf("failed to create cluster: %v", err)
}
for _, img := range k.localImages {
if err := k.RunCmd("kind", "load", "docker-image", "--name", k.clusterName, img); err != nil {
return fmt.Errorf("failed to load image: %v", err)
}
}
for _, mf := range k.deployManifests {
kubectlContext := fmt.Sprintf("kind-%v", k.clusterName)
if err := k.RunCmd("kubectl", "--context", kubectlContext, "apply", "-f", mf); err != nil {
return fmt.Errorf("failed to deploy manifest: %v", err)
}
}
return nil
}
func (k *KindTest) Destroy() error {
if k.logsDir != "" {
if err := k.RunCmd("kind", "export", "logs", k.logsDir, "--name", k.clusterName); err != nil {
return fmt.Errorf("failed to export logs: %v. will not teardown", err)
}
}
if err := k.RunCmd("kind", "delete", "cluster", "--name", k.clusterName); err != nil {
return fmt.Errorf("failed to destroy cluster: %v", err)
}
return nil
}
func TestE2EWithCommand(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("skipping test in short mode.")
}
var kindClusterConfigs = []string{
"previous",
"current",
"next",
}
// Iterate over each Kubernetes version
for _, version := range kindClusterConfigs {
version := version
// Define a subtest for each combination
t.Run(version, func(t *testing.T) {
t.Parallel() // Allow tests to run in parallel
randomInt := strconv.Itoa(rand.Intn(100))
kindClusterName := fmt.Sprintf("kured-e2e-command-%v-%v", version, randomInt)
kindClusterConfigFile := fmt.Sprintf("../../.github/kind-cluster-%v.yaml", version)
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy("testfiles/kured-ds.yaml"))
defer k.FlushLog()
err := k.Create()
if err != nil {
t.Fatalf("Error creating cluster %v", err)
}
defer func(k *KindTest) {
err := k.Destroy()
if err != nil {
t.Fatalf("Error destroying cluster %v", err)
}
}(k)
k.Write([]byte("Now running e2e tests"))
if err := k.RunCmd("bash", "testfiles/create-reboot-sentinels.sh", kindContext); err != nil {
t.Fatalf("failed to create sentinels: %v", err)
}
if err := k.RunCmd("bash", "testfiles/follow-coordinated-reboot.sh", kindContext); err != nil {
t.Fatalf("failed to follow reboot: %v", err)
}
})
}
}
func TestE2EWithSignal(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("skipping test in short mode.")
}
var kindClusterConfigs = []string{
"previous",
"current",
"next",
}
// Iterate over each Kubernetes version
for _, version := range kindClusterConfigs {
version := version
// Define a subtest for each combination
t.Run(version, func(t *testing.T) {
t.Parallel() // Allow tests to run in parallel
randomInt := strconv.Itoa(rand.Intn(100))
kindClusterName := fmt.Sprintf("kured-e2e-signal-%v-%v", version, randomInt)
kindClusterConfigFile := fmt.Sprintf("../../.github/kind-cluster-%v.yaml", version)
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy("testfiles/kured-ds-signal.yaml"))
defer k.FlushLog()
err := k.Create()
if err != nil {
t.Fatalf("Error creating cluster %v", err)
}
defer func(k *KindTest) {
err := k.Destroy()
if err != nil {
t.Fatalf("Error destroying cluster %v", err)
}
}(k)
k.Write([]byte("Now running e2e tests"))
if err := k.RunCmd("bash", "testfiles/create-reboot-sentinels.sh", kindContext); err != nil {
t.Fatalf("failed to create sentinels: %v", err)
}
if err := k.RunCmd("bash", "testfiles/follow-coordinated-reboot.sh", kindContext); err != nil {
t.Fatalf("failed to follow reboot: %v", err)
}
})
}
}
func TestE2EConcurrentWithCommand(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("skipping test in short mode.")
}
var kindClusterConfigs = []string{
"previous",
"current",
"next",
}
// Iterate over each Kubernetes version
for _, version := range kindClusterConfigs {
version := version
// Define a subtest for each combination
t.Run(version, func(t *testing.T) {
t.Parallel() // Allow tests to run in parallel
randomInt := strconv.Itoa(rand.Intn(100))
kindClusterName := fmt.Sprintf("kured-e2e-concurrentcommand-%v-%v", version, randomInt)
kindClusterConfigFile := fmt.Sprintf("../../.github/kind-cluster-%v.yaml", version)
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy("testfiles/kured-ds-concurrent-command.yaml"))
defer k.FlushLog()
err := k.Create()
if err != nil {
t.Fatalf("Error creating cluster %v", err)
}
defer func(k *KindTest) {
err := k.Destroy()
if err != nil {
t.Fatalf("Error destroying cluster %v", err)
}
}(k)
k.Write([]byte("Now running e2e tests"))
if err := k.RunCmd("bash", "testfiles/create-reboot-sentinels.sh", kindContext); err != nil {
t.Fatalf("failed to create sentinels: %v", err)
}
if err := k.RunCmd("bash", "testfiles/follow-coordinated-reboot.sh", kindContext); err != nil {
t.Fatalf("failed to follow reboot: %v", err)
}
})
}
}
func TestE2EConcurrentWithSignal(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("skipping test in short mode.")
}
var kindClusterConfigs = []string{
"previous",
"current",
"next",
}
// Iterate over each Kubernetes version
for _, version := range kindClusterConfigs {
version := version
// Define a subtest for each combination
t.Run(version, func(t *testing.T) {
t.Parallel() // Allow tests to run in parallel
randomInt := strconv.Itoa(rand.Intn(100))
kindClusterName := fmt.Sprintf("kured-e2e-concurrentsignal-%v-%v", version, randomInt)
kindClusterConfigFile := fmt.Sprintf("../../.github/kind-cluster-%v.yaml", version)
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy("testfiles/kured-ds-concurrent-signal.yaml"))
defer k.FlushLog()
err := k.Create()
if err != nil {
t.Fatalf("Error creating cluster %v", err)
}
defer func(k *KindTest) {
err := k.Destroy()
if err != nil {
t.Fatalf("Error destroying cluster %v", err)
}
}(k)
k.Write([]byte("Now running e2e tests"))
if err := k.RunCmd("bash", "testfiles/create-reboot-sentinels.sh", kindContext); err != nil {
t.Fatalf("failed to create sentinels: %v", err)
}
if err := k.RunCmd("bash", "testfiles/follow-coordinated-reboot.sh", kindContext); err != nil {
t.Fatalf("failed to follow reboot: %v", err)
}
})
}
}
func TestCordonningIsKept(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("skipping test in short mode.")
}
var kindClusterConfigs = []string{
"concurrency1",
"concurrency2",
}
// Iterate over each test variant
for _, variant := range kindClusterConfigs {
variant := variant
// Define a subtest for each combination
t.Run(variant, func(t *testing.T) {
t.Parallel() // Allow tests to run in parallel
randomInt := strconv.Itoa(rand.Intn(100))
kindClusterName := fmt.Sprintf("kured-e2e-cordon-%v-%v", variant, randomInt)
kindClusterConfigFile := "../../.github/kind-cluster-next.yaml"
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
var manifest string
if variant == "concurrency1" {
manifest = "testfiles/kured-ds-signal.yaml"
} else {
manifest = "testfiles/kured-ds-concurrent-signal.yaml"
}
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy(manifest))
defer k.FlushLog()
err := k.Create()
if err != nil {
t.Fatalf("Error creating cluster %v", err)
}
defer func(k *KindTest) {
err := k.Destroy()
if err != nil {
t.Fatalf("Error destroying cluster %v", err)
}
}(k)
k.Write([]byte("Now running e2e tests"))
if err := k.RunCmd("bash", "testfiles/node-stays-as-cordonned.sh", kindContext); err != nil {
t.Fatalf("node did not reboot in time: %v", err)
}
})
}
}
func TestE2EBlocker(t *testing.T) {
t.Parallel()
if testing.Short() {
t.Skip("skipping test in short mode.")
}
var kindClusterConfigs = []string{
"podblocker",
}
// Iterate over each variant of the test
for _, variant := range kindClusterConfigs {
variant := variant
// Define a subtest for each combination
t.Run(variant, func(t *testing.T) {
t.Parallel() // Allow tests to run in parallel
randomInt := strconv.Itoa(rand.Intn(100))
kindClusterName := fmt.Sprintf("kured-e2e-cordon-%v-%v", variant, randomInt)
kindClusterConfigFile := "../../.github/kind-cluster-next.yaml"
kindContext := fmt.Sprintf("kind-%v", kindClusterName)
k := NewKindTester(kindClusterName, kindClusterConfigFile, t, LocalImage(kuredDevImage), Deploy("../../kured-rbac.yaml"), Deploy(fmt.Sprintf("testfiles/kured-ds-%v.yaml", variant)))
defer k.FlushLog()
err := k.Create()
if err != nil {
t.Fatalf("Error creating cluster %v", err)
}
defer func(k *KindTest) {
err := k.Destroy()
if err != nil {
t.Fatalf("Error destroying cluster %v", err)
}
}(k)
k.Write([]byte("Now running e2e tests"))
if err := k.RunCmd("bash", fmt.Sprintf("testfiles/%v.sh", variant), kindContext); err != nil {
t.Fatalf("node blocker test did not succeed: %v", err)
}
})
}
}

View File

@@ -1,11 +0,0 @@
#!/usr/bin/env bash
kubectl_flags=( )
[[ "$1" != "" ]] && kubectl_flags=("${kubectl_flags[@]}" --context "$1")
# To speed up the system, let's not kill the control plane.
for nodename in $(${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes -o name | grep -v control-plane); do
echo "Creating reboot sentinel on $nodename"
docker exec "${nodename/node\//}" hostname
docker exec "${nodename/node\//}" touch "${SENTINEL_FILE:-/var/run/reboot-required}"
done

View File

@@ -1,59 +0,0 @@
#!/usr/bin/env bash
kubectl_flags=( )
[[ "$1" != "" ]] && kubectl_flags=("${kubectl_flags[@]}" --context "$1")
cordon() {
kubectl "${kubectl_flags[@]}" cordon "${precordonned_node}"
}
create_sentinel() {
docker exec "${precordonned_node}" touch "${SENTINEL_FILE:-/var/run/reboot-required}"
docker exec "${notcordonned_node}" touch "${SENTINEL_FILE:-/var/run/reboot-required}"
}
check_reboot_required() {
while true;
do
docker exec "${precordonned_node}" stat /var/run/reboot-required > /dev/null && echo "Reboot still required" || return 0
sleep 3
done
}
check_node_back_online_as_cordonned() {
sleep 5 # For safety, wait for 5 seconds, so that the kubectl command succeeds.
# This test might be giving us false positive until we work on reliability of the
# test.
while true;
do
result=$(kubectl "${kubectl_flags[@]}" get node "${precordonned_node}" --no-headers | awk '{print $2;}')
test "${result}" != "Ready,SchedulingDisabled" && echo "Node ${precordonned_node} in state ${result}" || return 0
sleep 3
done
}
check_node_back_online_as_uncordonned() {
while true;
do
result=$(kubectl "${kubectl_flags[@]}" get node "${notcordonned_node}" --no-headers | awk '{print $2;}')
test "${result}" != "Ready" && echo "Node ${notcordonned_node} in state ${result}" || return 0
sleep 3
done
}
### Start main
worker_nodes=$(${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes -o custom-columns=name:metadata.name --no-headers | grep worker)
precordonned_node=$(echo "$worker_nodes" | head -n 1)
notcordonned_node=$(echo "$worker_nodes" | tail -n 1)
# Wait for kured to install correctly
sleep 15
cordon
create_sentinel
check_reboot_required
echo "Node has rebooted, but may take time to come back ready"
check_node_back_online_as_cordonned
check_node_back_online_as_uncordonned
echo "Showing final node state"
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes
echo "Test successful"

View File

@@ -1,54 +0,0 @@
#!/usr/bin/env bash
kubectl_flags=( )
[[ "$1" != "" ]] && kubectl_flags=("${kubectl_flags[@]}" --context "$1")
function gather_logs_and_cleanup {
for id in $(docker ps -q); do
echo "############################################################"
echo "docker logs for container $id:"
docker logs "$id"
done
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" logs ds/kured --all-pods -n kube-system
}
trap gather_logs_and_cleanup EXIT
set +o errexit
worker=$(${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" get nodes -o custom-columns=name:metadata.name --no-headers | grep worker | head -n 1)
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" label nodes "$worker" blocked-host=yes
${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" apply -f - << EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: blocker
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
nodeSelector:
blocked-host: "yes"
EOF
docker exec "$worker" touch "${SENTINEL_FILE:-/var/run/reboot-required}"
set -o errexit
max_attempts="100"
attempt_num=1
sleep_time=5
until ${KUBECTL_CMD:-kubectl} "${kubectl_flags[@]}" logs ds/kured --all-pods -n kube-system | grep -i -e "Reboot.*blocked"
do
if (( attempt_num == max_attempts )); then
echo "Attempt $attempt_num failed and there are no more attempts left!"
exit 1
else
echo "Did not find 'reboot blocked' in the log, retrying in $sleep_time seconds (Attempt #$attempt_num)"
sleep "$sleep_time"
fi
(( attempt_num++ ))
done