mirror of
https://github.com/kubereboot/kured.git
synced 2026-04-27 20:16:39 +00:00
Compare commits
48 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca56c02e2e | ||
|
|
c0092171f4 | ||
|
|
9f0583ba71 | ||
|
|
c3cca29970 | ||
|
|
08ba855e41 | ||
|
|
c5d21d4e03 | ||
|
|
f0815759b2 | ||
|
|
8b983e2507 | ||
|
|
090b33b726 | ||
|
|
1bc4d46483 | ||
|
|
14636ee333 | ||
|
|
aba74cb73b | ||
|
|
40c99fbd76 | ||
|
|
18e4566504 | ||
|
|
55c66a4751 | ||
|
|
fdcc8438ac | ||
|
|
09c6ac3a13 | ||
|
|
bd5fd3312a | ||
|
|
20f61e3a13 | ||
|
|
dc746f5f88 | ||
|
|
ec79ea66d9 | ||
|
|
72913ee233 | ||
|
|
4b1506e15d | ||
|
|
6f7abae29b | ||
|
|
9db0ef7a38 | ||
|
|
f13943b929 | ||
|
|
db4510d21a | ||
|
|
cccf89601c | ||
|
|
606cc3b935 | ||
|
|
491b55acb1 | ||
|
|
091028f331 | ||
|
|
df0d58e3ae | ||
|
|
54dfa59722 | ||
|
|
5fae235d6a | ||
|
|
20bc76497d | ||
|
|
ed9e8f2b35 | ||
|
|
a59b47e75f | ||
|
|
feaf366ac0 | ||
|
|
972bab5e60 | ||
|
|
2575ab4bed | ||
|
|
250e1f0f58 | ||
|
|
554cf53b7b | ||
|
|
a415ae856f | ||
|
|
622c1c6082 | ||
|
|
8ed3e7991d | ||
|
|
18e1a4537d | ||
|
|
c4287dc22b | ||
|
|
bc43dacf4a |
@@ -1,3 +0,0 @@
|
||||
exemptions:
|
||||
- check: analytics
|
||||
reason: "We don't track people"
|
||||
21
.github/dependabot.yml
vendored
21
.github/dependabot.yml
vendored
@@ -1,21 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
# Maintain dependencies for GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
# Maintain dependencies for gomod
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
ignore:
|
||||
- dependency-name: "k8s.io/api"
|
||||
- dependency-name: "k8s.io/apimachinery"
|
||||
- dependency-name: "k8s.io/client-go"
|
||||
- dependency-name: "k8s.io/kubectl"
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
13
.github/kind-cluster-1.24.yaml
vendored
13
.github/kind-cluster-1.24.yaml
vendored
@@ -1,13 +0,0 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.24.7"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.24.7"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.24.7"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.24.7"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.24.7"
|
||||
13
.github/kind-cluster-1.25.yaml
vendored
13
.github/kind-cluster-1.25.yaml
vendored
@@ -1,13 +0,0 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: kindest/node:v1.25.3
|
||||
- role: control-plane
|
||||
image: kindest/node:v1.25.3
|
||||
- role: control-plane
|
||||
image: kindest/node:v1.25.3
|
||||
- role: worker
|
||||
image: kindest/node:v1.25.3
|
||||
- role: worker
|
||||
image: kindest/node:v1.25.3
|
||||
13
.github/kind-cluster-1.26.yaml
vendored
13
.github/kind-cluster-1.26.yaml
vendored
@@ -1,13 +0,0 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.26.0"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.26.0"
|
||||
- role: control-plane
|
||||
image: "kindest/node:v1.26.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.26.0"
|
||||
- role: worker
|
||||
image: "kindest/node:v1.26.0"
|
||||
37
.github/scripts/goreleaser-install.sh
vendored
37
.github/scripts/goreleaser-install.sh
vendored
@@ -1,37 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
RELEASES_URL="https://github.com/goreleaser/goreleaser/releases"
|
||||
FILE_BASENAME="goreleaser"
|
||||
|
||||
test -z "$VERSION" && {
|
||||
echo "Unable to get goreleaser version." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
test -z "$TMPDIR" && TMPDIR="$(mktemp -d)"
|
||||
TAR_FILE="$TMPDIR/${FILE_BASENAME}_$(uname -s)_$(uname -m).tar.gz"
|
||||
export TAR_FILE
|
||||
|
||||
(
|
||||
echo "Downloading GoReleaser $VERSION..."
|
||||
curl -sfLo "$TAR_FILE" \
|
||||
"$RELEASES_URL/download/$VERSION/${FILE_BASENAME}_$(uname -s)_$(uname -m).tar.gz"
|
||||
cd "$TMPDIR"
|
||||
curl -sfLo "checksums.txt" "$RELEASES_URL/download/$VERSION/checksums.txt"
|
||||
curl -sfLo "checksums.txt.sig" "$RELEASES_URL/download/$VERSION/checksums.txt.sig"
|
||||
echo "Verifying checksums..."
|
||||
sha256sum --ignore-missing --quiet --check checksums.txt
|
||||
if command -v cosign >/dev/null 2>&1; then
|
||||
echo "Verifying signatures..."
|
||||
COSIGN_EXPERIMENTAL=1 cosign verify-blob \
|
||||
--signature checksums.txt.sig \
|
||||
checksums.txt
|
||||
else
|
||||
echo "Could not verify signatures, cosign is not installed."
|
||||
fi
|
||||
)
|
||||
|
||||
tar -xf "$TAR_FILE" -O goreleaser > "$TMPDIR/goreleaser"
|
||||
rm "$TMPDIR/checksums.txt" "$TMPDIR/checksums.txt.sig"
|
||||
rm "$TAR_FILE"
|
||||
75
.github/workflows/codeql.yml
vendored
75
.github/workflows/codeql.yml
vendored
@@ -1,75 +0,0 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '24 13 * * 3'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
|
||||
# If the Autobuild fails above, remove it and uncomment the following three lines.
|
||||
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
|
||||
|
||||
# - run: |
|
||||
# echo "Run, Build Application using script"
|
||||
# ./location_of_script_within_repo/buildscript.sh
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
85
.github/workflows/on-main-push.yaml
vendored
85
.github/workflows/on-main-push.yaml
vendored
@@ -1,85 +0,0 @@
|
||||
# We publish every merged commit in the form of an image
|
||||
# named kured:<branch>-<short tag>
|
||||
name: Push image of latest main
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
tag-scan-and-push-final-image:
|
||||
name: "Build, scan, and publish tagged image"
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
|
||||
- name: Build binaries
|
||||
run: make kured-release-snapshot
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
|
||||
- name: Build image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/arm64, linux/amd64, linux/arm/v7, linux/arm/v6, linux/386
|
||||
push: true
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
|
||||
- name: Generate SBOM
|
||||
run: |
|
||||
.tmp/syft ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }} -o spdx > kured.sbom
|
||||
|
||||
- name: Sign and attest artifacts
|
||||
run: |
|
||||
.tmp/cosign sign -f -r ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
|
||||
.tmp/cosign sign-blob --output-signature kured.sbom.sig --output-certificate kured.sbom.pem kured.sbom
|
||||
|
||||
.tmp/cosign attest -f --type spdx --predicate kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
.tmp/cosign attach sbom --type spdx --sbom kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.sha_short }}
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
178
.github/workflows/on-pr.yaml
vendored
178
.github/workflows/on-pr.yaml
vendored
@@ -1,178 +0,0 @@
|
||||
name: PR
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
|
||||
jobs:
|
||||
pr-gotest:
|
||||
name: Run go tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: run tests
|
||||
run: go test -json ./... > test.json
|
||||
- name: Annotate tests
|
||||
if: always()
|
||||
uses: guyarb/golang-test-annoations@v0.6.0
|
||||
with:
|
||||
test-results: test.json
|
||||
|
||||
pr-shellcheck:
|
||||
name: Lint bash code with shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run ShellCheck
|
||||
uses: bewuethr/shellcheck-action@v2
|
||||
|
||||
pr-lint-code:
|
||||
name: Lint golang code
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Lint cmd folder
|
||||
uses: Jerome1337/golint-action@v1.0.3
|
||||
with:
|
||||
golint-path: './cmd/...'
|
||||
- name: Lint pkg folder
|
||||
uses: Jerome1337/golint-action@v1.0.3
|
||||
with:
|
||||
golint-path: './pkg/...'
|
||||
|
||||
pr-check-docs-links:
|
||||
name: Check docs for incorrect links
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Link Checker
|
||||
uses: lycheeverse/lychee-action@4dcb8bee2a0a4531cba1a1f392c54e8375d6dd81
|
||||
env:
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
with:
|
||||
args: --verbose --no-progress '*.md' '*.yaml' '*/*/*.go' --exclude-link-local
|
||||
fail: true
|
||||
|
||||
# This should not be made a mandatory test
|
||||
# It is only used to make us aware of any potential security failure, that
|
||||
# should trigger a bump of the image in build/.
|
||||
pr-vuln-scan:
|
||||
name: Build image and scan it against known vulnerabilities
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
- name: Build image
|
||||
run: VERSION="${{ steps.tags.outputs.sha_short }}" make image
|
||||
- uses: Azure/container-scan@v0
|
||||
env:
|
||||
# See https://github.com/goodwithtech/dockle/issues/188
|
||||
DOCKLE_HOST: "unix:///var/run/docker.sock"
|
||||
with:
|
||||
image-name: ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }}
|
||||
|
||||
# This ensures the latest code works with the manifests built from tree.
|
||||
# It is useful for two things:
|
||||
# - Test manifests changes (obviously), ensuring they don't break existing clusters
|
||||
# - Ensure manifests work with the latest versions even with no manifest change
|
||||
# (compared to helm charts, manifests cannot easily template changes based on versions)
|
||||
# Helm charts are _trailing_ releases, while manifests are done during development.
|
||||
e2e-manifests:
|
||||
name: End-to-End test with kured with code and manifests from HEAD
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
kubernetes:
|
||||
- "1.24"
|
||||
- "1.25"
|
||||
- "1.26"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
- name: Build artifacts
|
||||
run: |
|
||||
VERSION="${{ steps.tags.outputs.sha_short }}" make image
|
||||
VERSION="${{ steps.tags.outputs.sha_short }}" make manifest
|
||||
|
||||
- name: Workaround "Failed to attach 1 to compat systemd cgroup /actions_job/..." on gh actions
|
||||
run: |
|
||||
sudo bash << EOF
|
||||
cp /etc/docker/daemon.json /etc/docker/daemon.json.old
|
||||
echo '{}' > /etc/docker/daemon.json
|
||||
systemctl restart docker || journalctl --no-pager -n 500
|
||||
systemctl status docker
|
||||
EOF
|
||||
|
||||
# Default name for helm/kind-action kind clusters is "chart-testing"
|
||||
- name: Create kind cluster with 5 nodes
|
||||
uses: helm/kind-action@v1.5.0
|
||||
with:
|
||||
config: .github/kind-cluster-${{ matrix.kubernetes }}.yaml
|
||||
version: v0.14.0
|
||||
|
||||
- name: Preload previously built images onto kind cluster
|
||||
run: kind load docker-image ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }} --name chart-testing
|
||||
|
||||
- name: Do not wait for an hour before detecting the rebootSentinel
|
||||
run: |
|
||||
sed -i 's/#\(.*\)--period=1h/\1--period=30s/g' kured-ds.yaml
|
||||
|
||||
- name: Install kured with kubectl
|
||||
run: |
|
||||
kubectl apply -f kured-rbac.yaml && kubectl apply -f kured-ds.yaml
|
||||
|
||||
- name: Ensure kured is ready
|
||||
uses: nick-invision/retry@v2.8.3
|
||||
with:
|
||||
timeout_minutes: 10
|
||||
max_attempts: 10
|
||||
retry_wait_seconds: 60
|
||||
# DESIRED CURRENT READY UP-TO-DATE AVAILABLE should all be = to cluster_size
|
||||
command: "kubectl get ds -n kube-system kured | grep -E 'kured.*5.*5.*5.*5.*5'"
|
||||
|
||||
- name: Create reboot sentinel files
|
||||
run: |
|
||||
./tests/kind/create-reboot-sentinels.sh
|
||||
|
||||
- name: Follow reboot until success
|
||||
env:
|
||||
DEBUG: true
|
||||
run: |
|
||||
./tests/kind/follow-coordinated-reboot.sh
|
||||
97
.github/workflows/on-tag.yaml
vendored
97
.github/workflows/on-tag.yaml
vendored
@@ -1,97 +0,0 @@
|
||||
# when we add a tag to the repo, we should publish the kured image to a public repository
|
||||
# if it's safe.
|
||||
# It doesn't mean it's ready for release, but at least it's getting us started.
|
||||
# The next step is to have a PR with the helm chart, to bump the version of the image used
|
||||
name: Tag repo
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
tag-scan-and-push-final-image:
|
||||
name: "Build, scan, and publish tagged image"
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Find current tag version
|
||||
run: echo "version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
- name: Build binaries
|
||||
run: make kured-release-tag
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
- name: Build single image for scan
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64
|
||||
push: false
|
||||
load: true
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
|
||||
- uses: Azure/container-scan@v0
|
||||
env:
|
||||
# See https://github.com/goodwithtech/dockle/issues/188
|
||||
DOCKLE_HOST: "unix:///var/run/docker.sock"
|
||||
with:
|
||||
image-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@507c2f2dc502c992ad446e3d7a5dfbe311567a96
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build release images
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/arm64, linux/amd64, linux/arm/v7, linux/arm/v6, linux/386
|
||||
push: true
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
|
||||
- name: Generate SBOM
|
||||
run: |
|
||||
.tmp/syft ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }} -o spdx > kured.sbom
|
||||
|
||||
- name: Sign and attest artifacts
|
||||
run: |
|
||||
.tmp/cosign sign -f -r ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
|
||||
.tmp/cosign sign-blob --output-signature kured.sbom.sig kured.sbom
|
||||
|
||||
.tmp/cosign attest -f --type spdx --predicate kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
.tmp/cosign attach sbom --type spdx --sbom kured.sbom ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tags.outputs.version }}
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
77
.github/workflows/periodics-daily.yaml
vendored
77
.github/workflows/periodics-daily.yaml
vendored
@@ -1,77 +0,0 @@
|
||||
name: Daily jobs
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
jobs:
|
||||
periodics-gotest:
|
||||
name: Run go tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: run tests
|
||||
run: go test -json ./... > test.json
|
||||
- name: Annotate tests
|
||||
if: always()
|
||||
uses: guyarb/golang-test-annoations@v0.6.0
|
||||
with:
|
||||
test-results: test.json
|
||||
|
||||
periodics-mark-stale:
|
||||
name: Mark stale issues and PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Stale by default waits for 60 days before marking PR/issues as stale, and closes them after 21 days.
|
||||
# Do not expire the first issues that would allow the community to grow.
|
||||
- uses: actions/stale@v7
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'This issue was automatically considered stale due to lack of activity. Please update it and/or join our slack channels to promote it, before it automatically closes (in 7 days).'
|
||||
stale-pr-message: 'This PR was automatically considered stale due to lack of activity. Please refresh it and/or join our slack channels to highlight it, before it automatically closes (in 7 days).'
|
||||
stale-issue-label: 'no-issue-activity'
|
||||
stale-pr-label: 'no-pr-activity'
|
||||
exempt-issue-labels: 'good first issue,keep'
|
||||
days-before-close: 21
|
||||
|
||||
check-docs-links:
|
||||
name: Check docs for incorrect links
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Link Checker
|
||||
uses: lycheeverse/lychee-action@4dcb8bee2a0a4531cba1a1f392c54e8375d6dd81
|
||||
env:
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
with:
|
||||
args: --verbose --no-progress '*.md' '*.yaml' '*/*/*.go' --exclude-link-local
|
||||
fail: true
|
||||
|
||||
vuln-scan:
|
||||
name: Build image and scan it against known vulnerabilities
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Ensure go version
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Setup GoReleaser
|
||||
run: make bootstrap-tools
|
||||
- name: Find current tag version
|
||||
run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
id: tags
|
||||
- name: Build artifacts
|
||||
run: VERSION="${{ steps.tags.outputs.sha_short }}" make image
|
||||
- uses: Azure/container-scan@v0
|
||||
env:
|
||||
# See https://github.com/goodwithtech/dockle/issues/188
|
||||
DOCKLE_HOST: "unix:///var/run/docker.sock"
|
||||
with:
|
||||
image-name: ghcr.io/${{ github.repository }}:${{ steps.tags.outputs.sha_short }}
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
cmd/kured/kured
|
||||
vendor
|
||||
build
|
||||
dist
|
||||
.tmp
|
||||
@@ -1,32 +0,0 @@
|
||||
project_name: kured
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
builds:
|
||||
- main: ./cmd/kured
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
- "386"
|
||||
goarm:
|
||||
- "6"
|
||||
- "7"
|
||||
ldflags:
|
||||
- -s -w -X main.version={{ if .IsSnapshot }}{{ .ShortCommit }}{{ else }}{{ .Version }}{{ end }}
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
flags:
|
||||
- -trimpath
|
||||
|
||||
snapshot:
|
||||
name_template: "{{ .ShortCommit }}"
|
||||
|
||||
release:
|
||||
disable: true
|
||||
|
||||
changelog:
|
||||
skip: true
|
||||
@@ -1,4 +0,0 @@
|
||||
app.fossa.com
|
||||
cluster.local
|
||||
hooks.slack.com
|
||||
localhost
|
||||
@@ -1,3 +0,0 @@
|
||||
## Kured Community Code of Conduct
|
||||
|
||||
Kured follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
242
CONTRIBUTING.md
242
CONTRIBUTING.md
@@ -1,242 +0,0 @@
|
||||
# Developing `kured`
|
||||
|
||||
We love contributions to `kured`, no matter if you are [helping out on
|
||||
Slack][slack], reporting or triaging [issues][issues] or contributing code
|
||||
to `kured`.
|
||||
|
||||
In any case, it will make sense to familiarise yourself with the main
|
||||
[README][readme] to understand the different features and options, which is
|
||||
helpful for testing. The "building" section in particular makes sense if
|
||||
you are planning to contribute code.
|
||||
|
||||
[slack]: README.md#getting-help
|
||||
[issues]: https://github.com/kubereboot/kured/issues
|
||||
[readme]: README.md
|
||||
|
||||
## Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution.
|
||||
|
||||
We require all commits to be signed. By signing off with your signature, you
|
||||
certify that you wrote the patch or otherwise have the right to contribute the
|
||||
material by the rules of the [DCO](DCO):
|
||||
|
||||
`Signed-off-by: Jane Doe <jane.doe@example.com>`
|
||||
|
||||
The signature must contain your real name
|
||||
(sorry, no pseudonyms or anonymous contributions)
|
||||
If your `user.name` and `user.email` are configured in your Git config,
|
||||
you can sign your commit automatically with `git commit -s`.
|
||||
|
||||
## Kured Repositories
|
||||
|
||||
All Kured repositories are kept under <https://github.com/kubereboot>. To find the code and work on the individual pieces that make Kured, here is our overview:
|
||||
|
||||
| Repositories | Contents |
|
||||
| --------------------------------------- | ------------------------- |
|
||||
| <https://github.com/kubereboot/kured> | Kured operator itself |
|
||||
| <https://github.com/kubereboot/charts> | Helm chart |
|
||||
| <https://github.com/kubereboot/website> | website and documentation |
|
||||
|
||||
## Regular development activities
|
||||
|
||||
### Prepare environment
|
||||
|
||||
Please run `make bootstrap-tools` once on a fresh repository clone to download several needed tools, e.g. GoReleaser.
|
||||
|
||||
### Updating k8s support
|
||||
|
||||
Whenever we want to update e.g. the `kubectl` or `client-go` dependencies,
|
||||
some RBAC changes might be necessary too.
|
||||
|
||||
This is what it took to support Kubernetes 1.14:
|
||||
<https://github.com/kubereboot/kured/pull/75>
|
||||
|
||||
That the process can be more involved based on kubernetes changes.
|
||||
For example, k8s 1.10 changes to apps triggered the following commits:
|
||||
|
||||
b3f9ddf: Bump client-go for optimum k8s 1.10 compatibility
|
||||
bc3f28d: Move deployment manifest to apps/v1
|
||||
908998a: Update RBAC permissions for kubectl v1.10.3
|
||||
efbb0c3: Document version compatibility in release notes
|
||||
5731b98: Add warning to Dockerfile re: upgrading kubectl
|
||||
|
||||
Search the git log for inspiration for your cases.
|
||||
|
||||
Please update our `.github/workflows` with the new k8s images, starting by
|
||||
the creation of a `.github/kind-cluster-<version>.yaml`, then updating
|
||||
our workflows with the new versions.
|
||||
|
||||
Once you updated everything, make sure you update the support matrix on
|
||||
the main [README][readme] as well.
|
||||
|
||||
### Updating other dependencies
|
||||
|
||||
Dependabot proposes changes in our go.mod/go.sum.
|
||||
Some of those changes are covered by CI testing, some are not.
|
||||
|
||||
Please make sure to test those not covered by CI (mostly the integration
|
||||
with other tools) manually before merging.
|
||||
|
||||
### Review periodic jobs
|
||||
|
||||
We run periodic jobs (see also Automated testing section of this documentation).
|
||||
Those should be monitored for failures.
|
||||
|
||||
If a failure happen in periodics, something terribly wrong must have happened
|
||||
(or github is failing at the creation of a kind cluster). Please monitor those
|
||||
failures carefully.
|
||||
|
||||
### Introducing new features
|
||||
|
||||
When you introduce a new feature, the kured team expects you to have tested
|
||||
your change thoroughly. If possible, include all the necessary testing in your change.
|
||||
|
||||
If your change involves a user facing change (change in flags of kured for example),
|
||||
please include expose your new feature in our default manifest (`kured-ds.yaml`),
|
||||
as a comment.
|
||||
|
||||
Our release manifests and helm charts are our stable interfaces.
|
||||
Any user facing changes will therefore have to wait for a release before being
|
||||
exposed to our users.
|
||||
|
||||
This also means that when you expose a new feature, you should create another PR
|
||||
for your changes in <https://github.com/kubereboot/charts> to make your feature
|
||||
available at the next kured version for helm users.
|
||||
|
||||
In the charts PR, you can directly bump the appVersion to the next minor version
|
||||
(you are introducing a new feature, which requires a bump of the minor number.
|
||||
For example, if current appVersion is 1.6.x, make sure you update your appVersion
|
||||
to 1.7.0). It allows us to have an easy view of what we land each release.
|
||||
|
||||
Do not hesitate to increase the test coverage for your feature, whether it's unit
|
||||
testing to full functional testing (even using helm charts)
|
||||
|
||||
### Increasing test coverage
|
||||
|
||||
We are welcoming any change to increase our test coverage.
|
||||
See also our github issues for the label `testing`.
|
||||
|
||||
## Automated testing
|
||||
|
||||
Our CI is covered by github actions.
|
||||
You can see their contents in .github/workflows.
|
||||
|
||||
We currently run:
|
||||
|
||||
- go tests and lint
|
||||
- `shellcheck`
|
||||
- a check for dead links in our docs
|
||||
- a security check against our base image (alpine)
|
||||
- a deep functional test using our manifests on all supported k8s versions
|
||||
|
||||
To test your code manually, follow the section Manual testing.
|
||||
|
||||
## Manual (release) testing
|
||||
|
||||
Before `kured` is released, we want to make sure it still works fine on the
|
||||
previous, current and next minor version of Kubernetes (with respect to the
|
||||
`client-go` & `kubectl` dependencies in use). For local testing e.g.
|
||||
`minikube` or `kind` can be sufficient. This will allow you to catch issues
|
||||
that might not have been tested in our CI, like integration with other tools,
|
||||
or your specific use case.
|
||||
|
||||
Deploy kured in your test scenario, make sure you pass the right `image`,
|
||||
update the e.g. `period` and `reboot-days` options, so you get immediate
|
||||
results, if you login to a node and run:
|
||||
|
||||
```console
|
||||
sudo touch /var/run/reboot-required
|
||||
```
|
||||
|
||||
### Example of golang testing
|
||||
|
||||
Please run `make test`. You should have `golint` installed.
|
||||
|
||||
### Example of testing with `minikube`
|
||||
|
||||
A test-run with `minikube` could look like this:
|
||||
|
||||
```console
|
||||
# start minikube
|
||||
minikube start --driver=kvm2 --kubernetes-version <k8s-release>
|
||||
|
||||
# build kured image and publish to registry accessible by minikube
|
||||
make image minikube-publish
|
||||
|
||||
# edit kured-ds.yaml to
|
||||
# - point to new image
|
||||
# - change e.g. period and reboot-days option for immediate results
|
||||
|
||||
minikube kubectl -- apply -f kured-rbac.yaml
|
||||
minikube kubectl -- apply -f kured-ds.yaml
|
||||
minikube kubectl -- logs daemonset.apps/kured -n kube-system -f
|
||||
|
||||
# In separate terminal
|
||||
minikube ssh
|
||||
sudo touch /var/run/reboot-required
|
||||
minikube logs -f
|
||||
```
|
||||
|
||||
Now check for the 'Commanding reboot' message and minikube going down.
|
||||
|
||||
Unfortunately as of today, you are going to run into
|
||||
<https://github.com/kubernetes/minikube/issues/2874>. This means that
|
||||
minikube won't come back easily. You will need to start minikube again.
|
||||
Then you can check for the lock release.
|
||||
|
||||
### Example of testing with `kind`
|
||||
|
||||
A test-run with `kind` could look like this:
|
||||
|
||||
```console
|
||||
# create kind cluster
|
||||
kind create cluster --config .github/kind-cluster-<k8s-version>.yaml
|
||||
|
||||
# create reboot required files on pre-defined kind nodes
|
||||
./tests/kind/create-reboot-sentinels.sh
|
||||
|
||||
# check if reboot is working fine
|
||||
./tests/kind/follow-coordinated-reboot.sh
|
||||
|
||||
```
|
||||
|
||||
## Publishing a new kured release
|
||||
|
||||
### Prepare Documentation
|
||||
|
||||
Check that `README.md` has an updated compatibility matrix and that the
|
||||
url in the `kubectl` incantation (under "Installation") is updated to the
|
||||
new version you want to release.
|
||||
|
||||
### Create a tag on the repo
|
||||
|
||||
Before going further, we should freeze the code for a release, by
|
||||
tagging the code. The Github-Action should start a new job and push
|
||||
the new image to the registry.
|
||||
|
||||
### Create the combined manifest
|
||||
|
||||
Now create the `kured-<release>-dockerhub.yaml` for e.g. `1.3.0`:
|
||||
|
||||
```sh
|
||||
VERSION=1.3.0
|
||||
MANIFEST="kured-$VERSION-dockerhub.yaml"
|
||||
make DH_ORG="kubereboot" VERSION="${VERSION}" manifest
|
||||
cat kured-rbac.yaml > "$MANIFEST"
|
||||
cat kured-ds.yaml >> "$MANIFEST"
|
||||
```
|
||||
|
||||
### Publish release artifacts
|
||||
|
||||
Now you can head to the Github UI, use the version number as tag and upload the
|
||||
`kured-<release>-dockerhub.yaml` file.
|
||||
|
||||
Please describe what's new and noteworthy in the release notes, list the PRs
|
||||
that landed and give a shout-out to everyone who contributed.
|
||||
|
||||
Please also note down on which releases the upcoming `kured` release was
|
||||
tested on. (Check old release notes if you're unsure.)
|
||||
36
DCO
36
DCO
@@ -1,36 +0,0 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
@@ -1 +0,0 @@
|
||||
This file was moved to [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
25
Dockerfile
25
Dockerfile
@@ -1,25 +0,0 @@
|
||||
FROM --platform=$TARGETPLATFORM alpine:3.17.1 as bin
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
||||
COPY dist/ /dist
|
||||
RUN set -ex \
|
||||
&& case "${TARGETARCH}" in \
|
||||
amd64) \
|
||||
SUFFIX="_v1" \
|
||||
;; \
|
||||
arm) \
|
||||
SUFFIX="_${TARGETVARIANT:1}" \
|
||||
;; \
|
||||
*) \
|
||||
SUFFIX="" \
|
||||
;; \
|
||||
esac \
|
||||
&& cp /dist/kured_${TARGETOS}_${TARGETARCH}${SUFFIX}/kured /dist/kured;
|
||||
|
||||
FROM --platform=$TARGETPLATFORM alpine:3.17.1
|
||||
RUN apk update --no-cache && apk upgrade --no-cache && apk add --no-cache ca-certificates tzdata
|
||||
COPY --from=bin /dist/kured /usr/bin/kured
|
||||
ENTRYPOINT ["/usr/bin/kured"]
|
||||
112
GOVERNANCE.md
112
GOVERNANCE.md
@@ -1,112 +0,0 @@
|
||||
# Project Governance
|
||||
|
||||
- [Values](#values)
|
||||
- [Maintainers](#maintainers)
|
||||
- [Becoming a Maintainer](#becoming-a-maintainer)
|
||||
- [Meetings](#meetings)
|
||||
- [Code of Conduct Enforcement](#code-of-conduct)
|
||||
- [Voting](#voting)
|
||||
|
||||
## Values
|
||||
|
||||
The Kured project and its leadership embrace the following values:
|
||||
|
||||
- Openness: Communication and decision-making happens in the open and is discoverable for future
|
||||
reference. As much as possible, all discussions and work take place in public
|
||||
forums and open repositories.
|
||||
|
||||
- Fairness: All stakeholders have the opportunity to provide feedback and submit
|
||||
contributions, which will be considered on their merits.
|
||||
|
||||
- Community over Product or Company: Sustaining and growing our community takes
|
||||
priority over shipping code or sponsors' organizational goals. Each
|
||||
contributor participates in the project as an individual.
|
||||
|
||||
- Inclusivity: We innovate through different perspectives and skill sets, which
|
||||
can only be accomplished in a welcoming and respectful environment.
|
||||
|
||||
- Participation: Responsibilities within the project are earned through
|
||||
participation, and there is a clear path up the contributor ladder into leadership
|
||||
positions.
|
||||
|
||||
- Consensus: Whether or not wider input is required, the Kured community believes that
|
||||
the best decisions are reached through Consensus
|
||||
<https://en.wikipedia.org/wiki/Consensus_decision-making>.
|
||||
|
||||
## Maintainers
|
||||
|
||||
Kured Maintainers have write access to the [project GitHub
|
||||
organisation](https://github.com/kubereboot). They can merge their own patches or patches
|
||||
from others. The current maintainers can be found in [MAINTAINERS][maintainers-file].
|
||||
Maintainers collectively manage the project's resources and contributors.
|
||||
|
||||
This privilege is granted with some expectation of responsibility: maintainers
|
||||
are people who care about the Kured project and want to help it grow and
|
||||
improve. A maintainer is not just someone who can make changes, but someone who
|
||||
has demonstrated their ability to collaborate with the team, get the most
|
||||
knowledgeable people to review code and docs, contribute high-quality code, and
|
||||
follow through to fix issues (in code or tests).
|
||||
|
||||
A maintainer is a contributor to the project's success and a citizen helping
|
||||
the project succeed.
|
||||
|
||||
## Becoming a Maintainer
|
||||
|
||||
To become a Maintainer you need to demonstrate the following:
|
||||
|
||||
- commitment to the project:
|
||||
- participate in discussions, contributions, code and documentation reviews
|
||||
for 3 months or more and participate in Slack discussions and meetings
|
||||
if possible,
|
||||
- perform reviews for 5 non-trivial pull requests,
|
||||
- contribute 5 non-trivial pull requests and have them merged,
|
||||
- ability to write quality code and/or documentation,
|
||||
- ability to collaborate with the team,
|
||||
- understanding of how the team works (policies, processes for testing and code review, etc),
|
||||
- understanding of the project's code base and coding and documentation style.
|
||||
|
||||
We realise that everybody brings different abilities and qualities to the team, that's
|
||||
why we are willing to change the rules somewhat depending on the circumstances.
|
||||
|
||||
A new Maintainer can apply by proposing a PR to the [MAINTAINERS
|
||||
file][maintainers-file]. A simple majority vote of existing Maintainers
|
||||
approves the application.
|
||||
|
||||
Maintainers who are selected will be granted the necessary GitHub rights,
|
||||
and invited to the [private maintainer mailing list][private-list].
|
||||
|
||||
## Meetings
|
||||
|
||||
Time zones permitting, Maintainers are expected to participate in the public
|
||||
developer meeting, details can be found [here][meeting-agenda].
|
||||
|
||||
Maintainers will also have closed meetings in order to discuss security reports
|
||||
or Code of Conduct violations. Such meetings should be scheduled by any
|
||||
Maintainer on receipt of a security issue or CoC report. All current Maintainers
|
||||
must be invited to such closed meetings, except for any Maintainer who is
|
||||
accused of a CoC violation.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
[Code of Conduct](./CODE_OF_CONDUCT.md) violations by community members will
|
||||
be discussed and resolved on the [private Maintainer mailing list][private-list].
|
||||
If the reported CoC violator is a Maintainer, the Maintainers will instead
|
||||
designate two Maintainers to work with CNCF staff in resolving the report.
|
||||
|
||||
## Voting
|
||||
|
||||
While most business in Kured is conducted by "lazy consensus", periodically
|
||||
the Maintainers may need to vote on specific actions or changes.
|
||||
A vote can be taken in [kured issues labeled 'decision'][decision-issues] or
|
||||
[the private Maintainer mailing list][private-list] for security or conduct
|
||||
matters. Votes may also be taken at [the developer meeting][meeting-agenda].
|
||||
Any Maintainer may demand a vote be taken.
|
||||
|
||||
Most votes require a simple majority of all Maintainers to succeed. Maintainers
|
||||
can be removed by a 2/3 majority vote of all Maintainers, and changes to this
|
||||
Governance require a 2/3 vote of all Maintainers.
|
||||
|
||||
[maintainers-file]: ./MAINTAINERS
|
||||
[private-list]: cncf-kured-maintainers@lists.cncf.io
|
||||
[meeting-agenda]: https://docs.google.com/document/d/1bsHTjHhqaaZ7yJnXF6W8c89UB_yn-OoSZEmDnIP34n8/edit#
|
||||
[decision-issues]: https://github.com/kubereboot/kured/labels/decision
|
||||
191
LICENSE
191
LICENSE
@@ -1,191 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2017 Weaveworks Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -1,5 +0,0 @@
|
||||
Christian Kotzbauer <christian.kotzbauer@gmail.com> (@ckotzbauer)
|
||||
Daniel Holbach <daniel@weave.works> (@dholbach)
|
||||
Hidde Beydals <hidde@weave.works> (@hiddeco)
|
||||
Jack Francis <jackfrancis@gmail.com> (@jackfrancis)
|
||||
Jean-Philippe Evrard <open-source@a.spamming.party> (@evrardjp)
|
||||
53
Makefile
53
Makefile
@@ -1,53 +0,0 @@
|
||||
.DEFAULT: all
|
||||
.PHONY: all clean image minikube-publish manifest test kured-all
|
||||
|
||||
TEMPDIR=./.tmp
|
||||
GORELEASER_CMD=$(TEMPDIR)/goreleaser
|
||||
DH_ORG=kubereboot
|
||||
VERSION=$(shell git rev-parse --short HEAD)
|
||||
SUDO=$(shell docker info >/dev/null 2>&1 || echo "sudo -E")
|
||||
|
||||
all: image
|
||||
|
||||
$(TEMPDIR):
|
||||
mkdir -p $(TEMPDIR)
|
||||
|
||||
.PHONY: bootstrap-tools
|
||||
bootstrap-tools: $(TEMPDIR)
|
||||
VERSION=v1.11.4 TMPDIR=.tmp bash .github/scripts/goreleaser-install.sh
|
||||
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sh -s -- -b .tmp v0.58.0
|
||||
curl -sSfL https://github.com/sigstore/cosign/releases/download/v1.12.1/cosign-linux-amd64 -o .tmp/cosign
|
||||
chmod +x .tmp/goreleaser .tmp/cosign .tmp/syft
|
||||
|
||||
clean:
|
||||
rm -rf ./dist
|
||||
|
||||
kured:
|
||||
$(GORELEASER_CMD) build --rm-dist --single-target --snapshot
|
||||
|
||||
kured-all:
|
||||
$(GORELEASER_CMD) build --rm-dist --snapshot
|
||||
|
||||
kured-release-tag:
|
||||
$(GORELEASER_CMD) release --rm-dist
|
||||
|
||||
kured-release-snapshot:
|
||||
$(GORELEASER_CMD) release --rm-dist --snapshot
|
||||
|
||||
image: kured
|
||||
$(SUDO) docker buildx build --load -t ghcr.io/$(DH_ORG)/kured:$(VERSION) .
|
||||
|
||||
minikube-publish: image
|
||||
$(SUDO) docker save ghcr.io/$(DH_ORG)/kured | (eval $$(minikube docker-env) && docker load)
|
||||
|
||||
manifest:
|
||||
sed -i "s#image: ghcr.io/.*kured.*#image: ghcr.io/$(DH_ORG)/kured:$(VERSION)#g" kured-ds.yaml
|
||||
echo "Please generate combined manifest if necessary"
|
||||
|
||||
test:
|
||||
echo "Running go tests"
|
||||
go test ./...
|
||||
echo "Running golint on pkg"
|
||||
golint ./pkg/...
|
||||
echo "Running golint on cmd"
|
||||
golint ./cmd/...
|
||||
72
README.md
72
README.md
@@ -1,66 +1,20 @@
|
||||
# kured - Kubernetes Reboot Daemon
|
||||
# Kured Helm Repository
|
||||
|
||||
[](https://artifacthub.io/packages/helm/kured/kured)
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fkubereboot%2Fkured?ref=badge_shield)
|
||||
[](https://clomonitor.io/projects/cncf/kured)
|
||||

|
||||
|
||||
<img src="https://github.com/kubereboot/website/raw/main/static/img/kured.png" width="200" align="right"/>
|
||||
Caution! We are currently in the middle of the move to a different github organisation.
|
||||
Here is the info for the new organisation.
|
||||
|
||||
- [kured - Kubernetes Reboot Daemon](#kured---kubernetes-reboot-daemon)
|
||||
- [Introduction](#introduction)
|
||||
- [Documentation](#documentation)
|
||||
- [Getting Help](#getting-help)
|
||||
- [Trademarks](#trademarks)
|
||||
- [License](#license)
|
||||
Add Kured repository to Helm repos:
|
||||
|
||||
## Introduction
|
||||
```console
|
||||
helm repo add kubereboot https://kubereboot.github.io/charts/
|
||||
```
|
||||
|
||||
Kured (KUbernetes REboot Daemon) is a Kubernetes daemonset that
|
||||
performs safe automatic node reboots when the need to do so is
|
||||
indicated by the package management system of the underlying OS.
|
||||
## Install Kured
|
||||
|
||||
- Watches for the presence of a reboot sentinel file e.g. `/var/run/reboot-required`
|
||||
or the successful run of a sentinel command.
|
||||
- Utilises a lock in the API server to ensure only one node reboots at
|
||||
a time
|
||||
- Optionally defers reboots in the presence of active Prometheus alerts or selected pods
|
||||
- Cordons & drains worker nodes before reboot, uncordoning them after
|
||||
```console
|
||||
helm install my-release kubereboot/kured
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
Find all our docs on <https://kured.dev>:
|
||||
|
||||
- [All Kured Documentation](https://kured.dev/docs/)
|
||||
- [Installing Kured](https://kured.dev/docs/installation/)
|
||||
- [Configuring Kured](https://kured.dev/docs/configuration/)
|
||||
- [Operating Kured](https://kured.dev/docs/operation/)
|
||||
- [Developing Kured](https://kured.dev/docs/development/)
|
||||
|
||||
And there's much more!
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you have any questions about, feedback for or problems with `kured`:
|
||||
|
||||
- Invite yourself to the <a href="https://slack.cncf.io/" target="_blank">CNCF Slack</a>.
|
||||
- Ask a question on the [#kured](https://cloud-native.slack.com/archives/kured) slack channel.
|
||||
- [File an issue](https://github.com/kubereboot/kured/issues/new).
|
||||
- Join us in [our monthly meeting](https://docs.google.com/document/d/1bsHTjHhqaaZ7yJnXF6W8c89UB_yn-OoSZEmDnIP34n8/edit#),
|
||||
every first Wednesday of the month at 16:00 UTC.
|
||||
- You might want to [join the kured-dev mailing list](https://lists.cncf.io/g/cncf-kured-dev) as well.
|
||||
|
||||
We follow the [CNCF Code of Conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
Your feedback is always welcome!
|
||||
|
||||
## Trademarks
|
||||
|
||||
**Kured is a [Cloud Native Computing Foundation](https://cncf.io/) Sandbox project.**
|
||||
|
||||

|
||||
|
||||
The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see [Trademark Usage](https://www.linuxfoundation.org/trademark-usage/).
|
||||
|
||||
## License
|
||||
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fkubereboot%2Fkured?ref=badge_large)
|
||||
For more details on installing Kured please see the [chart readme](https://github.com/kubereboot/charts/tree/main/charts/kured).
|
||||
|
||||
@@ -1,837 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
papi "github.com/prometheus/client_golang/api"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
kubectldrain "k8s.io/kubectl/pkg/drain"
|
||||
|
||||
"github.com/google/shlex"
|
||||
|
||||
shoutrrr "github.com/containrrr/shoutrrr"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/kubereboot/kured/pkg/alerts"
|
||||
"github.com/kubereboot/kured/pkg/daemonsetlock"
|
||||
"github.com/kubereboot/kured/pkg/delaytick"
|
||||
"github.com/kubereboot/kured/pkg/taints"
|
||||
"github.com/kubereboot/kured/pkg/timewindow"
|
||||
)
|
||||
|
||||
var (
|
||||
version = "unreleased"
|
||||
|
||||
// Command line flags
|
||||
forceReboot bool
|
||||
drainTimeout time.Duration
|
||||
rebootDelay time.Duration
|
||||
period time.Duration
|
||||
drainGracePeriod int
|
||||
skipWaitForDeleteTimeoutSeconds int
|
||||
dsNamespace string
|
||||
dsName string
|
||||
lockAnnotation string
|
||||
lockTTL time.Duration
|
||||
lockReleaseDelay time.Duration
|
||||
prometheusURL string
|
||||
preferNoScheduleTaintName string
|
||||
alertFilter *regexp.Regexp
|
||||
alertFiringOnly bool
|
||||
rebootSentinelFile string
|
||||
rebootSentinelCommand string
|
||||
notifyURL string
|
||||
slackHookURL string
|
||||
slackUsername string
|
||||
slackChannel string
|
||||
messageTemplateDrain string
|
||||
messageTemplateReboot string
|
||||
messageTemplateUncordon string
|
||||
podSelectors []string
|
||||
rebootCommand string
|
||||
logFormat string
|
||||
preRebootNodeLabels []string
|
||||
postRebootNodeLabels []string
|
||||
nodeID string
|
||||
|
||||
rebootDays []string
|
||||
rebootStart string
|
||||
rebootEnd string
|
||||
timezone string
|
||||
annotateNodes bool
|
||||
|
||||
// Metrics
|
||||
rebootRequiredGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Subsystem: "kured",
|
||||
Name: "reboot_required",
|
||||
Help: "OS requires reboot due to software updates.",
|
||||
}, []string{"node"})
|
||||
)
|
||||
|
||||
const (
|
||||
// KuredNodeLockAnnotation is the canonical string value for the kured node-lock annotation
|
||||
KuredNodeLockAnnotation string = "weave.works/kured-node-lock"
|
||||
// KuredRebootInProgressAnnotation is the canonical string value for the kured reboot-in-progress annotation
|
||||
KuredRebootInProgressAnnotation string = "weave.works/kured-reboot-in-progress"
|
||||
// KuredMostRecentRebootNeededAnnotation is the canonical string value for the kured most-recent-reboot-needed annotation
|
||||
KuredMostRecentRebootNeededAnnotation string = "weave.works/kured-most-recent-reboot-needed"
|
||||
// EnvPrefix The environment variable prefix of all environment variables bound to our command line flags.
|
||||
EnvPrefix = "KURED"
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(rebootRequiredGauge)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cmd := NewRootCommand()
|
||||
|
||||
if err := cmd.Execute(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// NewRootCommand construct the Cobra root command
|
||||
func NewRootCommand() *cobra.Command {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "kured",
|
||||
Short: "Kubernetes Reboot Daemon",
|
||||
PersistentPreRunE: bindViper,
|
||||
PreRun: flagCheck,
|
||||
Run: root}
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&nodeID, "node-id", "",
|
||||
"node name kured runs on, should be passed down from spec.nodeName via KURED_NODE_ID environment variable")
|
||||
rootCmd.PersistentFlags().BoolVar(&forceReboot, "force-reboot", false,
|
||||
"force a reboot even if the drain fails or times out")
|
||||
rootCmd.PersistentFlags().IntVar(&drainGracePeriod, "drain-grace-period", -1,
|
||||
"time in seconds given to each pod to terminate gracefully, if negative, the default value specified in the pod will be used")
|
||||
rootCmd.PersistentFlags().IntVar(&skipWaitForDeleteTimeoutSeconds, "skip-wait-for-delete-timeout", 0,
|
||||
"when seconds is greater than zero, skip waiting for the pods whose deletion timestamp is older than N seconds while draining a node")
|
||||
rootCmd.PersistentFlags().DurationVar(&drainTimeout, "drain-timeout", 0,
|
||||
"timeout after which the drain is aborted (default: 0, infinite time)")
|
||||
rootCmd.PersistentFlags().DurationVar(&rebootDelay, "reboot-delay", 0,
|
||||
"delay reboot for this duration (default: 0, disabled)")
|
||||
rootCmd.PersistentFlags().DurationVar(&period, "period", time.Minute*60,
|
||||
"sentinel check period")
|
||||
rootCmd.PersistentFlags().StringVar(&dsNamespace, "ds-namespace", "kube-system",
|
||||
"namespace containing daemonset on which to place lock")
|
||||
rootCmd.PersistentFlags().StringVar(&dsName, "ds-name", "kured",
|
||||
"name of daemonset on which to place lock")
|
||||
rootCmd.PersistentFlags().StringVar(&lockAnnotation, "lock-annotation", KuredNodeLockAnnotation,
|
||||
"annotation in which to record locking node")
|
||||
rootCmd.PersistentFlags().DurationVar(&lockTTL, "lock-ttl", 0,
|
||||
"expire lock annotation after this duration (default: 0, disabled)")
|
||||
rootCmd.PersistentFlags().DurationVar(&lockReleaseDelay, "lock-release-delay", 0,
|
||||
"delay lock release for this duration (default: 0, disabled)")
|
||||
rootCmd.PersistentFlags().StringVar(&prometheusURL, "prometheus-url", "",
|
||||
"Prometheus instance to probe for active alerts")
|
||||
rootCmd.PersistentFlags().Var(®expValue{&alertFilter}, "alert-filter-regexp",
|
||||
"alert names to ignore when checking for active alerts")
|
||||
rootCmd.PersistentFlags().BoolVar(&alertFiringOnly, "alert-firing-only", false,
|
||||
"only consider firing alerts when checking for active alerts")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootSentinelFile, "reboot-sentinel", "/var/run/reboot-required",
|
||||
"path to file whose existence triggers the reboot command")
|
||||
rootCmd.PersistentFlags().StringVar(&preferNoScheduleTaintName, "prefer-no-schedule-taint", "",
|
||||
"Taint name applied during pending node reboot (to prevent receiving additional pods from other rebooting nodes). Disabled by default. Set e.g. to \"weave.works/kured-node-reboot\" to enable tainting.")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootSentinelCommand, "reboot-sentinel-command", "",
|
||||
"command for which a zero return code will trigger a reboot command")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootCommand, "reboot-command", "/bin/systemctl reboot",
|
||||
"command to run when a reboot is required")
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&slackHookURL, "slack-hook-url", "",
|
||||
"slack hook URL for reboot notifications [deprecated in favor of --notify-url]")
|
||||
rootCmd.PersistentFlags().StringVar(&slackUsername, "slack-username", "kured",
|
||||
"slack username for reboot notifications")
|
||||
rootCmd.PersistentFlags().StringVar(&slackChannel, "slack-channel", "",
|
||||
"slack channel for reboot notifications")
|
||||
rootCmd.PersistentFlags().StringVar(¬ifyURL, "notify-url", "",
|
||||
"notify URL for reboot notifications (cannot use with --slack-hook-url flags)")
|
||||
rootCmd.PersistentFlags().StringVar(&messageTemplateUncordon, "message-template-uncordon", "Node %s rebooted & uncordoned successfully!",
|
||||
"message template used to notify about a node being successfully uncordoned")
|
||||
rootCmd.PersistentFlags().StringVar(&messageTemplateDrain, "message-template-drain", "Draining node %s",
|
||||
"message template used to notify about a node being drained")
|
||||
rootCmd.PersistentFlags().StringVar(&messageTemplateReboot, "message-template-reboot", "Rebooting node %s",
|
||||
"message template used to notify about a node being rebooted")
|
||||
|
||||
rootCmd.PersistentFlags().StringArrayVar(&podSelectors, "blocking-pod-selector", nil,
|
||||
"label selector identifying pods whose presence should prevent reboots")
|
||||
|
||||
rootCmd.PersistentFlags().StringSliceVar(&rebootDays, "reboot-days", timewindow.EveryDay,
|
||||
"schedule reboot on these days")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootStart, "start-time", "0:00",
|
||||
"schedule reboot only after this time of day")
|
||||
rootCmd.PersistentFlags().StringVar(&rebootEnd, "end-time", "23:59:59",
|
||||
"schedule reboot only before this time of day")
|
||||
rootCmd.PersistentFlags().StringVar(&timezone, "time-zone", "UTC",
|
||||
"use this timezone for schedule inputs")
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&annotateNodes, "annotate-nodes", false,
|
||||
"if set, the annotations 'weave.works/kured-reboot-in-progress' and 'weave.works/kured-most-recent-reboot-needed' will be given to nodes undergoing kured reboots")
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text",
|
||||
"use text or json log format")
|
||||
|
||||
rootCmd.PersistentFlags().StringSliceVar(&preRebootNodeLabels, "pre-reboot-node-labels", nil,
|
||||
"labels to add to nodes before cordoning")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&postRebootNodeLabels, "post-reboot-node-labels", nil,
|
||||
"labels to add to nodes after uncordoning")
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
// func that checks for deprecated slack-notification-related flags and node labels that do not match
|
||||
func flagCheck(cmd *cobra.Command, args []string) {
|
||||
if slackHookURL != "" && notifyURL != "" {
|
||||
log.Warnf("Cannot use both --notify-url and --slack-hook-url flags. Kured will use --notify-url flag only...")
|
||||
}
|
||||
if notifyURL != "" {
|
||||
notifyURL = stripQuotes(notifyURL)
|
||||
} else if slackHookURL != "" {
|
||||
slackHookURL = stripQuotes(slackHookURL)
|
||||
log.Warnf("Deprecated flag(s). Please use --notify-url flag instead.")
|
||||
trataURL, err := url.Parse(slackHookURL)
|
||||
if err != nil {
|
||||
log.Warnf("slack-hook-url is not properly formatted... no notification will be sent: %v\n", err)
|
||||
}
|
||||
if len(strings.Split(strings.Trim(trataURL.Path, "/services/"), "/")) != 3 {
|
||||
log.Warnf("slack-hook-url is not properly formatted... no notification will be sent: unexpected number of / in URL\n")
|
||||
} else {
|
||||
notifyURL = fmt.Sprintf("slack://%s", strings.Trim(trataURL.Path, "/services/"))
|
||||
}
|
||||
}
|
||||
var preRebootNodeLabelKeys, postRebootNodeLabelKeys []string
|
||||
for _, label := range preRebootNodeLabels {
|
||||
preRebootNodeLabelKeys = append(preRebootNodeLabelKeys, strings.Split(label, "=")[0])
|
||||
}
|
||||
for _, label := range postRebootNodeLabels {
|
||||
postRebootNodeLabelKeys = append(postRebootNodeLabelKeys, strings.Split(label, "=")[0])
|
||||
}
|
||||
sort.Strings(preRebootNodeLabelKeys)
|
||||
sort.Strings(postRebootNodeLabelKeys)
|
||||
if !reflect.DeepEqual(preRebootNodeLabelKeys, postRebootNodeLabelKeys) {
|
||||
log.Warnf("pre-reboot-node-labels keys and post-reboot-node-labels keys do not match. This may result in unexpected behaviour.")
|
||||
}
|
||||
}
|
||||
|
||||
// stripQuotes removes any literal single or double quote chars that surround a string
|
||||
func stripQuotes(str string) string {
|
||||
if len(str) > 2 {
|
||||
firstChar := str[0]
|
||||
lastChar := str[len(str)-1]
|
||||
if firstChar == lastChar && (firstChar == '"' || firstChar == '\'') {
|
||||
return str[1 : len(str)-1]
|
||||
}
|
||||
}
|
||||
// return the original string if it has a length of zero or one
|
||||
return str
|
||||
}
|
||||
|
||||
// bindViper initializes viper and binds command flags with environment variables
|
||||
func bindViper(cmd *cobra.Command, args []string) error {
|
||||
v := viper.New()
|
||||
|
||||
v.SetEnvPrefix(EnvPrefix)
|
||||
v.AutomaticEnv()
|
||||
bindFlags(cmd, v)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// bindFlags binds each cobra flag to its associated viper configuration (environment variable)
|
||||
func bindFlags(cmd *cobra.Command, v *viper.Viper) {
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
// Environment variables can't have dashes in them, so bind them to their equivalent keys with underscores
|
||||
if strings.Contains(f.Name, "-") {
|
||||
v.BindEnv(f.Name, flagToEnvVar(f.Name))
|
||||
}
|
||||
|
||||
// Apply the viper config value to the flag when the flag is not set and viper has a value
|
||||
if !f.Changed && v.IsSet(f.Name) {
|
||||
val := v.Get(f.Name)
|
||||
log.Infof("Binding %s command flag to environment variable: %s", f.Name, flagToEnvVar(f.Name))
|
||||
cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// flagToEnvVar converts command flag name to equivalent environment variable name
|
||||
func flagToEnvVar(flag string) string {
|
||||
envVarSuffix := strings.ToUpper(strings.ReplaceAll(flag, "-", "_"))
|
||||
return fmt.Sprintf("%s_%s", EnvPrefix, envVarSuffix)
|
||||
}
|
||||
|
||||
// newCommand creates a new Command with stdout/stderr wired to our standard logger
|
||||
func newCommand(name string, arg ...string) *exec.Cmd {
|
||||
cmd := exec.Command(name, arg...)
|
||||
cmd.Stdout = log.NewEntry(log.StandardLogger()).
|
||||
WithField("cmd", cmd.Args[0]).
|
||||
WithField("std", "out").
|
||||
WriterLevel(log.InfoLevel)
|
||||
|
||||
cmd.Stderr = log.NewEntry(log.StandardLogger()).
|
||||
WithField("cmd", cmd.Args[0]).
|
||||
WithField("std", "err").
|
||||
WriterLevel(log.WarnLevel)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// buildHostCommand writes a new command to run in the host namespace
|
||||
// Rancher based need different pid
|
||||
func buildHostCommand(pid int, command []string) []string {
|
||||
|
||||
// From the container, we nsenter into the proper PID to run the hostCommand.
|
||||
// For this, kured daemonset need to be configured with hostPID:true and privileged:true
|
||||
cmd := []string{"/usr/bin/nsenter", fmt.Sprintf("-m/proc/%d/ns/mnt", pid), "--"}
|
||||
cmd = append(cmd, command...)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func rebootRequired(sentinelCommand []string) bool {
|
||||
if err := newCommand(sentinelCommand[0], sentinelCommand[1:]...).Run(); err != nil {
|
||||
switch err := err.(type) {
|
||||
case *exec.ExitError:
|
||||
// We assume a non-zero exit code means 'reboot not required', but of course
|
||||
// the user could have misconfigured the sentinel command or something else
|
||||
// went wrong during its execution. In that case, not entering a reboot loop
|
||||
// is the right thing to do, and we are logging stdout/stderr of the command
|
||||
// so it should be obvious what is wrong.
|
||||
return false
|
||||
default:
|
||||
// Something was grossly misconfigured, such as the command path being wrong.
|
||||
log.Fatalf("Error invoking sentinel command: %v", err)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RebootBlocker interface should be implemented by types
|
||||
// to know if their instantiations should block a reboot
|
||||
type RebootBlocker interface {
|
||||
isBlocked() bool
|
||||
}
|
||||
|
||||
// PrometheusBlockingChecker contains info for connecting
|
||||
// to prometheus, and can give info about whether a reboot should be blocked
|
||||
type PrometheusBlockingChecker struct {
|
||||
// prometheusClient to make prometheus-go-client and api config available
|
||||
// into the PrometheusBlockingChecker struct
|
||||
promClient *alerts.PromClient
|
||||
// regexp used to get alerts
|
||||
filter *regexp.Regexp
|
||||
// bool to indicate if only firing alerts should be considered
|
||||
firingOnly bool
|
||||
}
|
||||
|
||||
// KubernetesBlockingChecker contains info for connecting
|
||||
// to k8s, and can give info about whether a reboot should be blocked
|
||||
type KubernetesBlockingChecker struct {
|
||||
// client used to contact kubernetes API
|
||||
client *kubernetes.Clientset
|
||||
nodename string
|
||||
// lised used to filter pods (podSelector)
|
||||
filter []string
|
||||
}
|
||||
|
||||
func (pb PrometheusBlockingChecker) isBlocked() bool {
|
||||
|
||||
alertNames, err := pb.promClient.ActiveAlerts(pb.filter, pb.firingOnly)
|
||||
if err != nil {
|
||||
log.Warnf("Reboot blocked: prometheus query error: %v", err)
|
||||
return true
|
||||
}
|
||||
count := len(alertNames)
|
||||
if count > 10 {
|
||||
alertNames = append(alertNames[:10], "...")
|
||||
}
|
||||
if count > 0 {
|
||||
log.Warnf("Reboot blocked: %d active alerts: %v", count, alertNames)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (kb KubernetesBlockingChecker) isBlocked() bool {
|
||||
fieldSelector := fmt.Sprintf("spec.nodeName=%s,status.phase!=Succeeded,status.phase!=Failed,status.phase!=Unknown", kb.nodename)
|
||||
for _, labelSelector := range kb.filter {
|
||||
podList, err := kb.client.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
FieldSelector: fieldSelector,
|
||||
Limit: 10})
|
||||
if err != nil {
|
||||
log.Warnf("Reboot blocked: pod query error: %v", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(podList.Items) > 0 {
|
||||
podNames := make([]string, 0, len(podList.Items))
|
||||
for _, pod := range podList.Items {
|
||||
podNames = append(podNames, pod.Name)
|
||||
}
|
||||
if len(podList.Continue) > 0 {
|
||||
podNames = append(podNames, "...")
|
||||
}
|
||||
log.Warnf("Reboot blocked: matching pods: %v", podNames)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func rebootBlocked(blockers ...RebootBlocker) bool {
|
||||
for _, blocker := range blockers {
|
||||
if blocker.isBlocked() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func holding(lock *daemonsetlock.DaemonSetLock, metadata interface{}) bool {
|
||||
holding, err := lock.Test(metadata)
|
||||
if err != nil {
|
||||
log.Fatalf("Error testing lock: %v", err)
|
||||
}
|
||||
if holding {
|
||||
log.Infof("Holding lock")
|
||||
}
|
||||
return holding
|
||||
}
|
||||
|
||||
func acquire(lock *daemonsetlock.DaemonSetLock, metadata interface{}, TTL time.Duration) bool {
|
||||
holding, holder, err := lock.Acquire(metadata, TTL)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Fatalf("Error acquiring lock: %v", err)
|
||||
return false
|
||||
case !holding:
|
||||
log.Warnf("Lock already held: %v", holder)
|
||||
return false
|
||||
default:
|
||||
log.Infof("Acquired reboot lock")
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func throttle(releaseDelay time.Duration) {
|
||||
if releaseDelay > 0 {
|
||||
log.Infof("Delaying lock release by %v", releaseDelay)
|
||||
time.Sleep(releaseDelay)
|
||||
}
|
||||
}
|
||||
|
||||
func release(lock *daemonsetlock.DaemonSetLock) {
|
||||
log.Infof("Releasing lock")
|
||||
if err := lock.Release(); err != nil {
|
||||
log.Fatalf("Error releasing lock: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func drain(client *kubernetes.Clientset, node *v1.Node) error {
|
||||
nodename := node.GetName()
|
||||
|
||||
if preRebootNodeLabels != nil {
|
||||
updateNodeLabels(client, node, preRebootNodeLabels)
|
||||
}
|
||||
|
||||
log.Infof("Draining node %s", nodename)
|
||||
|
||||
if notifyURL != "" {
|
||||
if err := shoutrrr.Send(notifyURL, fmt.Sprintf(messageTemplateDrain, nodename)); err != nil {
|
||||
log.Warnf("Error notifying: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
drainer := &kubectldrain.Helper{
|
||||
Client: client,
|
||||
Ctx: context.Background(),
|
||||
GracePeriodSeconds: drainGracePeriod,
|
||||
SkipWaitForDeleteTimeoutSeconds: skipWaitForDeleteTimeoutSeconds,
|
||||
Force: true,
|
||||
DeleteEmptyDirData: true,
|
||||
IgnoreAllDaemonSets: true,
|
||||
ErrOut: os.Stderr,
|
||||
Out: os.Stdout,
|
||||
Timeout: drainTimeout,
|
||||
}
|
||||
|
||||
if err := kubectldrain.RunCordonOrUncordon(drainer, node, true); err != nil {
|
||||
log.Errorf("Error cordonning %s: %v", nodename, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := kubectldrain.RunNodeDrain(drainer, nodename); err != nil {
|
||||
log.Errorf("Error draining %s: %v", nodename, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func uncordon(client *kubernetes.Clientset, node *v1.Node) error {
|
||||
nodename := node.GetName()
|
||||
log.Infof("Uncordoning node %s", nodename)
|
||||
drainer := &kubectldrain.Helper{
|
||||
Client: client,
|
||||
ErrOut: os.Stderr,
|
||||
Out: os.Stdout,
|
||||
Ctx: context.Background(),
|
||||
}
|
||||
if err := kubectldrain.RunCordonOrUncordon(drainer, node, false); err != nil {
|
||||
log.Fatalf("Error uncordonning %s: %v", nodename, err)
|
||||
return err
|
||||
} else if postRebootNodeLabels != nil {
|
||||
updateNodeLabels(client, node, postRebootNodeLabels)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func invokeReboot(nodeID string, rebootCommand []string) {
|
||||
log.Infof("Running command: %s for node: %s", rebootCommand, nodeID)
|
||||
|
||||
if notifyURL != "" {
|
||||
if err := shoutrrr.Send(notifyURL, fmt.Sprintf(messageTemplateReboot, nodeID)); err != nil {
|
||||
log.Warnf("Error notifying: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := newCommand(rebootCommand[0], rebootCommand[1:]...).Run(); err != nil {
|
||||
log.Fatalf("Error invoking reboot command: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func maintainRebootRequiredMetric(nodeID string, sentinelCommand []string) {
|
||||
for {
|
||||
if rebootRequired(sentinelCommand) {
|
||||
rebootRequiredGauge.WithLabelValues(nodeID).Set(1)
|
||||
} else {
|
||||
rebootRequiredGauge.WithLabelValues(nodeID).Set(0)
|
||||
}
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
// nodeMeta is used to remember information across reboots
|
||||
type nodeMeta struct {
|
||||
Unschedulable bool `json:"unschedulable"`
|
||||
}
|
||||
|
||||
func addNodeAnnotations(client *kubernetes.Clientset, nodeID string, annotations map[string]string) error {
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeID, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("Error retrieving node object via k8s API: %s", err)
|
||||
return err
|
||||
}
|
||||
for k, v := range annotations {
|
||||
node.Annotations[k] = v
|
||||
log.Infof("Adding node %s annotation: %s=%s", node.GetName(), k, v)
|
||||
}
|
||||
|
||||
bytes, err := json.Marshal(node)
|
||||
if err != nil {
|
||||
log.Errorf("Error marshalling node object into JSON: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.CoreV1().Nodes().Patch(context.TODO(), node.GetName(), types.StrategicMergePatchType, bytes, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
var annotationsErr string
|
||||
for k, v := range annotations {
|
||||
annotationsErr += fmt.Sprintf("%s=%s ", k, v)
|
||||
}
|
||||
log.Errorf("Error adding node annotations %s via k8s API: %v", annotationsErr, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteNodeAnnotation(client *kubernetes.Clientset, nodeID, key string) error {
|
||||
log.Infof("Deleting node %s annotation %s", nodeID, key)
|
||||
|
||||
// JSON Patch takes as path input a JSON Pointer, defined in RFC6901
|
||||
// So we replace all instances of "/" with "~1" as per:
|
||||
// https://tools.ietf.org/html/rfc6901#section-3
|
||||
patch := []byte(fmt.Sprintf("[{\"op\":\"remove\",\"path\":\"/metadata/annotations/%s\"}]", strings.ReplaceAll(key, "/", "~1")))
|
||||
_, err := client.CoreV1().Nodes().Patch(context.TODO(), nodeID, types.JSONPatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("Error deleting node annotation %s via k8s API: %v", key, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateNodeLabels(client *kubernetes.Clientset, node *v1.Node, labels []string) {
|
||||
labelsMap := make(map[string]string)
|
||||
for _, label := range labels {
|
||||
k := strings.Split(label, "=")[0]
|
||||
v := strings.Split(label, "=")[1]
|
||||
labelsMap[k] = v
|
||||
log.Infof("Updating node %s label: %s=%s", node.GetName(), k, v)
|
||||
}
|
||||
|
||||
bytes, err := json.Marshal(map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"labels": labelsMap,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Error marshalling node object into JSON: %v", err)
|
||||
}
|
||||
|
||||
_, err = client.CoreV1().Nodes().Patch(context.TODO(), node.GetName(), types.StrategicMergePatchType, bytes, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
var labelsErr string
|
||||
for _, label := range labels {
|
||||
k := strings.Split(label, "=")[0]
|
||||
v := strings.Split(label, "=")[1]
|
||||
labelsErr += fmt.Sprintf("%s=%s ", k, v)
|
||||
}
|
||||
log.Errorf("Error updating node labels %s via k8s API: %v", labelsErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func rebootAsRequired(nodeID string, rebootCommand []string, sentinelCommand []string, window *timewindow.TimeWindow, TTL time.Duration, releaseDelay time.Duration) {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
lock := daemonsetlock.New(client, nodeID, dsNamespace, dsName, lockAnnotation)
|
||||
|
||||
nodeMeta := nodeMeta{}
|
||||
source := rand.NewSource(time.Now().UnixNano())
|
||||
tick := delaytick.New(source, 1*time.Minute)
|
||||
for range tick {
|
||||
if holding(lock, &nodeMeta) {
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeID, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("Error retrieving node object via k8s API: %v", err)
|
||||
continue
|
||||
}
|
||||
if !nodeMeta.Unschedulable {
|
||||
err = uncordon(client, node)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to uncordon %s: %v, will continue to hold lock and retry uncordon", node.GetName(), err)
|
||||
continue
|
||||
} else {
|
||||
if notifyURL != "" {
|
||||
if err := shoutrrr.Send(notifyURL, fmt.Sprintf(messageTemplateUncordon, nodeID)); err != nil {
|
||||
log.Warnf("Error notifying: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we're holding the lock we know we've tried, in a prior run, to reboot
|
||||
// So (1) we want to confirm that the reboot succeeded practically ( !rebootRequired() )
|
||||
// And (2) check if we previously annotated the node that it was in the process of being rebooted,
|
||||
// And finally (3) if it has that annotation, to delete it.
|
||||
// This indicates to other node tools running on the cluster that this node may be a candidate for maintenance
|
||||
if annotateNodes && !rebootRequired(sentinelCommand) {
|
||||
if _, ok := node.Annotations[KuredRebootInProgressAnnotation]; ok {
|
||||
err := deleteNodeAnnotation(client, nodeID, KuredRebootInProgressAnnotation)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
throttle(releaseDelay)
|
||||
release(lock)
|
||||
break
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
preferNoScheduleTaint := taints.New(client, nodeID, preferNoScheduleTaintName, v1.TaintEffectPreferNoSchedule)
|
||||
|
||||
// Remove taint immediately during startup to quickly allow scheduling again.
|
||||
if !rebootRequired(sentinelCommand) {
|
||||
preferNoScheduleTaint.Disable()
|
||||
}
|
||||
|
||||
// instantiate prometheus client
|
||||
promClient, err := alerts.NewPromClient(papi.Config{Address: prometheusURL})
|
||||
if err != nil {
|
||||
log.Fatal("Unable to create prometheus client: ", err)
|
||||
}
|
||||
|
||||
source = rand.NewSource(time.Now().UnixNano())
|
||||
tick = delaytick.New(source, period)
|
||||
for range tick {
|
||||
if !window.Contains(time.Now()) {
|
||||
// Remove taint outside the reboot time window to allow for normal operation.
|
||||
preferNoScheduleTaint.Disable()
|
||||
continue
|
||||
}
|
||||
|
||||
if !rebootRequired(sentinelCommand) {
|
||||
log.Infof("Reboot not required")
|
||||
preferNoScheduleTaint.Disable()
|
||||
continue
|
||||
}
|
||||
log.Infof("Reboot required")
|
||||
|
||||
var blockCheckers []RebootBlocker
|
||||
if prometheusURL != "" {
|
||||
blockCheckers = append(blockCheckers, PrometheusBlockingChecker{promClient: promClient, filter: alertFilter, firingOnly: alertFiringOnly})
|
||||
}
|
||||
if podSelectors != nil {
|
||||
blockCheckers = append(blockCheckers, KubernetesBlockingChecker{client: client, nodename: nodeID, filter: podSelectors})
|
||||
}
|
||||
|
||||
if rebootBlocked(blockCheckers...) {
|
||||
continue
|
||||
}
|
||||
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeID, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Fatalf("Error retrieving node object via k8s API: %v", err)
|
||||
}
|
||||
nodeMeta.Unschedulable = node.Spec.Unschedulable
|
||||
|
||||
var timeNowString string
|
||||
if annotateNodes {
|
||||
if _, ok := node.Annotations[KuredRebootInProgressAnnotation]; !ok {
|
||||
timeNowString = time.Now().Format(time.RFC3339)
|
||||
// Annotate this node to indicate that "I am going to be rebooted!"
|
||||
// so that other node maintenance tools running on the cluster are aware that this node is in the process of a "state transition"
|
||||
annotations := map[string]string{KuredRebootInProgressAnnotation: timeNowString}
|
||||
// & annotate this node with a timestamp so that other node maintenance tools know how long it's been since this node has been marked for reboot
|
||||
annotations[KuredMostRecentRebootNeededAnnotation] = timeNowString
|
||||
err := addNodeAnnotations(client, nodeID, annotations)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !holding(lock, &nodeMeta) && !acquire(lock, &nodeMeta, TTL) {
|
||||
// Prefer to not schedule pods onto this node to avoid draing the same pod multiple times.
|
||||
preferNoScheduleTaint.Enable()
|
||||
continue
|
||||
}
|
||||
|
||||
err = drain(client, node)
|
||||
if err != nil {
|
||||
if !forceReboot {
|
||||
log.Errorf("Unable to cordon or drain %s: %v, will release lock and retry cordon and drain before rebooting when lock is next acquired", node.GetName(), err)
|
||||
release(lock)
|
||||
log.Infof("Performing a best-effort uncordon after failed cordon and drain")
|
||||
uncordon(client, node)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if rebootDelay > 0 {
|
||||
log.Infof("Delaying reboot for %v", rebootDelay)
|
||||
time.Sleep(rebootDelay)
|
||||
}
|
||||
|
||||
invokeReboot(nodeID, rebootCommand)
|
||||
for {
|
||||
log.Infof("Waiting for reboot")
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildSentinelCommand creates the shell command line which will need wrapping to escape
|
||||
// the container boundaries
|
||||
func buildSentinelCommand(rebootSentinelFile string, rebootSentinelCommand string) []string {
|
||||
if rebootSentinelCommand != "" {
|
||||
cmd, err := shlex.Split(rebootSentinelCommand)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing provided sentinel command: %v", err)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
return []string{"test", "-f", rebootSentinelFile}
|
||||
}
|
||||
|
||||
// parseRebootCommand creates the shell command line which will need wrapping to escape
|
||||
// the container boundaries
|
||||
func parseRebootCommand(rebootCommand string) []string {
|
||||
command, err := shlex.Split(rebootCommand)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing provided reboot command: %v", err)
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
func root(cmd *cobra.Command, args []string) {
|
||||
if logFormat == "json" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
}
|
||||
|
||||
log.Infof("Kubernetes Reboot Daemon: %s", version)
|
||||
|
||||
if nodeID == "" {
|
||||
log.Fatal("KURED_NODE_ID environment variable required")
|
||||
}
|
||||
|
||||
window, err := timewindow.New(rebootDays, rebootStart, rebootEnd, timezone)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to build time window: %v", err)
|
||||
}
|
||||
|
||||
sentinelCommand := buildSentinelCommand(rebootSentinelFile, rebootSentinelCommand)
|
||||
restartCommand := parseRebootCommand(rebootCommand)
|
||||
|
||||
log.Infof("Node ID: %s", nodeID)
|
||||
log.Infof("Lock Annotation: %s/%s:%s", dsNamespace, dsName, lockAnnotation)
|
||||
if lockTTL > 0 {
|
||||
log.Infof("Lock TTL set, lock will expire after: %v", lockTTL)
|
||||
} else {
|
||||
log.Info("Lock TTL not set, lock will remain until being released")
|
||||
}
|
||||
if lockReleaseDelay > 0 {
|
||||
log.Infof("Lock release delay set, lock release will be delayed by: %v", lockReleaseDelay)
|
||||
} else {
|
||||
log.Info("Lock release delay not set, lock will be released immediately after rebooting")
|
||||
}
|
||||
log.Infof("PreferNoSchedule taint: %s", preferNoScheduleTaintName)
|
||||
log.Infof("Blocking Pod Selectors: %v", podSelectors)
|
||||
log.Infof("Reboot schedule: %v", window)
|
||||
log.Infof("Reboot check command: %s every %v", sentinelCommand, period)
|
||||
log.Infof("Reboot command: %s", restartCommand)
|
||||
if annotateNodes {
|
||||
log.Infof("Will annotate nodes during kured reboot operations")
|
||||
}
|
||||
|
||||
// To run those commands as it was the host, we'll use nsenter
|
||||
// Relies on hostPID:true and privileged:true to enter host mount space
|
||||
// PID set to 1, until we have a better discovery mechanism.
|
||||
hostSentinelCommand := buildHostCommand(1, sentinelCommand)
|
||||
hostRestartCommand := buildHostCommand(1, restartCommand)
|
||||
|
||||
go rebootAsRequired(nodeID, hostRestartCommand, hostSentinelCommand, window, lockTTL, lockReleaseDelay)
|
||||
go maintainRebootRequiredMetric(nodeID, hostSentinelCommand)
|
||||
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
}
|
||||
@@ -1,310 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/kubereboot/kured/pkg/alerts"
|
||||
assert "gotest.tools/v3/assert"
|
||||
|
||||
papi "github.com/prometheus/client_golang/api"
|
||||
)
|
||||
|
||||
type BlockingChecker struct {
|
||||
blocking bool
|
||||
}
|
||||
|
||||
func (fbc BlockingChecker) isBlocked() bool {
|
||||
return fbc.blocking
|
||||
}
|
||||
|
||||
var _ RebootBlocker = BlockingChecker{} // Verify that Type implements Interface.
|
||||
var _ RebootBlocker = (*BlockingChecker)(nil) // Verify that *Type implements Interface.
|
||||
|
||||
func Test_flagCheck(t *testing.T) {
|
||||
var cmd *cobra.Command
|
||||
var args []string
|
||||
slackHookURL = "https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
|
||||
expected := "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
}
|
||||
|
||||
// validate that surrounding quotes are stripped
|
||||
slackHookURL = "\"https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET\""
|
||||
expected = "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
}
|
||||
slackHookURL = "'https://hooks.slack.com/services/BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET'"
|
||||
expected = "slack://BLABLABA12345/IAM931A0VERY/COMPLICATED711854TOKEN1SET"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("Slack URL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
}
|
||||
slackHookURL = ""
|
||||
notifyURL = "\"teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com\""
|
||||
expected = "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("notifyURL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
}
|
||||
notifyURL = "'teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com'"
|
||||
expected = "teams://79b4XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@acd8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/204cXXXXXXXXXXXXXXXXXXXXXXXXXXXX/a1f8XXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX?host=XXXX.webhook.office.com"
|
||||
flagCheck(cmd, args)
|
||||
if notifyURL != expected {
|
||||
t.Errorf("notifyURL Parsing is wrong: expecting %s but got %s\n", expected, notifyURL)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_stripQuotes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "string with no surrounding quotes is unchanged",
|
||||
input: "Hello, world!",
|
||||
expected: "Hello, world!",
|
||||
},
|
||||
{
|
||||
name: "string with surrounding double quotes should strip quotes",
|
||||
input: "\"Hello, world!\"",
|
||||
expected: "Hello, world!",
|
||||
},
|
||||
{
|
||||
name: "string with surrounding single quotes should strip quotes",
|
||||
input: "'Hello, world!'",
|
||||
expected: "Hello, world!",
|
||||
},
|
||||
{
|
||||
name: "string with unbalanced surrounding quotes is unchanged",
|
||||
input: "'Hello, world!\"",
|
||||
expected: "'Hello, world!\"",
|
||||
},
|
||||
{
|
||||
name: "string with length of one is unchanged",
|
||||
input: "'",
|
||||
expected: "'",
|
||||
},
|
||||
{
|
||||
name: "string with length of zero is unchanged",
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := stripQuotes(tt.input); !reflect.DeepEqual(got, tt.expected) {
|
||||
t.Errorf("stripQuotes() = %v, expected %v", got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_rebootBlocked(t *testing.T) {
|
||||
noCheckers := []RebootBlocker{}
|
||||
nonblockingChecker := BlockingChecker{blocking: false}
|
||||
blockingChecker := BlockingChecker{blocking: true}
|
||||
|
||||
// Instantiate a prometheusClient with a broken_url
|
||||
promClient, err := alerts.NewPromClient(papi.Config{Address: "broken_url"})
|
||||
if err != nil {
|
||||
log.Fatal("Can't create prometheusClient: ", err)
|
||||
}
|
||||
brokenPrometheusClient := PrometheusBlockingChecker{promClient: promClient, filter: nil, firingOnly: false}
|
||||
|
||||
type args struct {
|
||||
blockers []RebootBlocker
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Do not block on no blocker defined",
|
||||
args: args{blockers: noCheckers},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Ensure a blocker blocks",
|
||||
args: args{blockers: []RebootBlocker{blockingChecker}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Ensure a non-blocker doesn't block",
|
||||
args: args{blockers: []RebootBlocker{nonblockingChecker}},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Ensure one blocker is enough to block",
|
||||
args: args{blockers: []RebootBlocker{nonblockingChecker, blockingChecker}},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Do block on error contacting prometheus API",
|
||||
args: args{blockers: []RebootBlocker{brokenPrometheusClient}},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := rebootBlocked(tt.args.blockers...); got != tt.want {
|
||||
t.Errorf("rebootBlocked() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildHostCommand(t *testing.T) {
|
||||
type args struct {
|
||||
pid int
|
||||
command []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Ensure command will run with nsenter",
|
||||
args: args{pid: 1, command: []string{"ls", "-Fal"}},
|
||||
want: []string{"/usr/bin/nsenter", "-m/proc/1/ns/mnt", "--", "ls", "-Fal"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := buildHostCommand(tt.args.pid, tt.args.command); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("buildHostCommand() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildSentinelCommand(t *testing.T) {
|
||||
type args struct {
|
||||
rebootSentinelFile string
|
||||
rebootSentinelCommand string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Ensure a sentinelFile generates a shell 'test' command with the right file",
|
||||
args: args{
|
||||
rebootSentinelFile: "/test1",
|
||||
rebootSentinelCommand: "",
|
||||
},
|
||||
want: []string{"test", "-f", "/test1"},
|
||||
},
|
||||
{
|
||||
name: "Ensure a sentinelCommand has priority over a sentinelFile if both are provided (because sentinelFile is always provided)",
|
||||
args: args{
|
||||
rebootSentinelFile: "/test1",
|
||||
rebootSentinelCommand: "/sbin/reboot-required -r",
|
||||
},
|
||||
want: []string{"/sbin/reboot-required", "-r"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := buildSentinelCommand(tt.args.rebootSentinelFile, tt.args.rebootSentinelCommand); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("buildSentinelCommand() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseRebootCommand(t *testing.T) {
|
||||
type args struct {
|
||||
rebootCommand string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Ensure a reboot command is properly parsed",
|
||||
args: args{
|
||||
rebootCommand: "/sbin/systemctl reboot",
|
||||
},
|
||||
want: []string{"/sbin/systemctl", "reboot"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := parseRebootCommand(tt.args.rebootCommand); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("parseRebootCommand() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_rebootRequired(t *testing.T) {
|
||||
type args struct {
|
||||
sentinelCommand []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Ensure rc = 0 means reboot required",
|
||||
args: args{
|
||||
sentinelCommand: []string{"true"},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Ensure rc != 0 means reboot NOT required",
|
||||
args: args{
|
||||
sentinelCommand: []string{"false"},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := rebootRequired(tt.args.sentinelCommand); got != tt.want {
|
||||
t.Errorf("rebootRequired() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_rebootRequired_fatals(t *testing.T) {
|
||||
cases := []struct {
|
||||
param []string
|
||||
expectFatal bool
|
||||
}{
|
||||
{
|
||||
param: []string{"true"},
|
||||
expectFatal: false,
|
||||
},
|
||||
{
|
||||
param: []string{"./babar"},
|
||||
expectFatal: true,
|
||||
},
|
||||
}
|
||||
|
||||
defer func() { log.StandardLogger().ExitFunc = nil }()
|
||||
var fatal bool
|
||||
log.StandardLogger().ExitFunc = func(int) { fatal = true }
|
||||
|
||||
for _, c := range cases {
|
||||
fatal = false
|
||||
rebootRequired(c.param)
|
||||
assert.Equal(t, c.expectFatal, fatal)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type regexpValue struct {
|
||||
value **regexp.Regexp
|
||||
}
|
||||
|
||||
func (rev *regexpValue) String() string {
|
||||
if *rev.value == nil {
|
||||
return ""
|
||||
}
|
||||
return (*rev.value).String()
|
||||
}
|
||||
|
||||
func (rev *regexpValue) Set(s string) error {
|
||||
value, err := regexp.Compile(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*rev.value = value
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rev *regexpValue) Type() string {
|
||||
return "regexp.Regexp"
|
||||
}
|
||||
108
go.mod
108
go.mod
@@ -1,108 +0,0 @@
|
||||
module github.com/kubereboot/kured
|
||||
|
||||
go 1.18
|
||||
|
||||
replace (
|
||||
golang.org/x/net => golang.org/x/net v0.4.0
|
||||
golang.org/x/text => golang.org/x/text v0.3.8
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/containrrr/shoutrrr v0.7.1
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/prometheus/common v0.39.0
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.15.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
gotest.tools/v3 v3.4.0
|
||||
k8s.io/api v0.25.5
|
||||
k8s.io/apimachinery v0.25.5
|
||||
k8s.io/client-go v0.25.5
|
||||
k8s.io/kubectl v0.25.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-errors/errors v1.0.1 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/russross/blackfriday v1.5.2 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
|
||||
golang.org/x/net v0.4.0 // indirect
|
||||
golang.org/x/oauth2 v0.3.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/cli-runtime v0.25.5 // indirect
|
||||
k8s.io/component-base v0.25.5 // indirect
|
||||
k8s.io/klog/v2 v2.70.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.12.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 24 KiB |
688
index.yaml
Normal file
688
index.yaml
Normal file
@@ -0,0 +1,688 @@
|
||||
apiVersion: v1
|
||||
entries:
|
||||
kured:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.10.2
|
||||
created: "2022-08-20T09:11:27.828539459Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 0d69719fdec1e5c264cb0b04a849aca7452a016e85ea1caca64d3b57b402c75c
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-3.0.1.tgz
|
||||
version: 3.0.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.10.1
|
||||
created: "2022-07-31T13:51:38.928629992Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 1b66b4183ca1d3ac66779cc5ff2e1276c2a2325c17875a85a19532e8a5022a10
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-3.0.0.tgz
|
||||
version: 3.0.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.10.1
|
||||
created: "2022-07-01T15:44:53.561402098Z"
|
||||
description: A Helm chart for kured
|
||||
digest: e2727a5db21ab73d8c57db5a2a3cd09793296408c0c494279f8e2afb5d52cf28
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.17.0.tgz
|
||||
version: 2.17.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.10.0
|
||||
created: "2022-06-29T12:50:23.453793995Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 1e047a20c633e226d7f77fc4e85b33a5547ce3e2b44525b680cb3d0b89350cbd
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.16.0.tgz
|
||||
version: 2.16.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.10.0
|
||||
created: "2022-06-08T17:32:33.101479721Z"
|
||||
description: A Helm chart for kured
|
||||
digest: e168f38de6d44da877509c099fcad738e5fcc3b99240ded34221c7bfa7ed5d0a
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.15.0.tgz
|
||||
version: 2.15.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.9.2
|
||||
created: "2022-05-25T04:51:50.346850231Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 48b267700a0d48ab73e4b6ace31c1c84c393959ed09c31a3ec03e170b6b4aacf
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.14.2.tgz
|
||||
version: 2.14.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.9.2
|
||||
created: "2022-05-12T06:57:59.679228473Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 345949c01aecbc73312a8dbdd2b7b553ca1a80fc24744a17c92b3c3c990f36a2
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.14.1.tgz
|
||||
version: 2.14.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.9.2
|
||||
created: "2022-05-06T19:42:06.720738587Z"
|
||||
description: A Helm chart for kured
|
||||
digest: cddb002491f4d32fb418dadc3cb846b12885fa6cb8c32d0968021c11bb3b2733
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.14.0.tgz
|
||||
version: 2.14.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.9.2
|
||||
created: "2022-04-02T15:26:54.467410377Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 76000a5c32552deab99bae1745fcb195f73f99bfcdb847a96cbcc4f833d4b641
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.13.0.tgz
|
||||
version: 2.13.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.9.2
|
||||
created: "2022-03-29T10:07:10.572530457Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 7635175d009834464b53f92184066a2e17dffe5a9c9f7965c32ffaada570326e
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.12.1.tgz
|
||||
version: 2.12.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.9.1
|
||||
created: "2022-03-16T10:49:00.591818431Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 5ef50be15401f068d6558e23f327333c960cd48b3d09431e56362f5da5aed84c
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.12.0.tgz
|
||||
version: 2.12.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.9.1
|
||||
created: "2022-01-12T06:25:36.587168836Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 9f2991549faa094ffb8324abeec649d39f9d2dd915e0287e11642411a47a4c26
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.11.2.tgz
|
||||
version: 2.11.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.9.1
|
||||
created: "2022-01-06T18:13:28.526458698Z"
|
||||
description: A Helm chart for kured
|
||||
digest: cb9884e9968426177a39d78b437d02046bd61b019cb8f3165624560ba24a9907
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.11.1.tgz
|
||||
version: 2.11.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.9.0
|
||||
created: "2021-12-17T13:15:05.508704637Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 125117291df9b58f7961de17d4d2d8d0b55267e2acc90ad76a2aab1fc9efea96
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.11.0.tgz
|
||||
version: 2.11.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.8.2
|
||||
created: "2021-12-06T14:04:27.615912334Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 0527e881055b974e869e86d6bda1a5ac1a86f305dbf7f9d7ba8cc082a24f1e32
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.10.2.tgz
|
||||
version: 2.10.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.8.1
|
||||
created: "2021-11-27T10:19:18.570439253Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 905576b23f8263dcf26da50da6c004cb266a143cca0567f0e5d5586569b8e367
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.10.1.tgz
|
||||
version: 2.10.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.8.0
|
||||
created: "2021-10-08T14:02:19.678658295Z"
|
||||
description: A Helm chart for kured
|
||||
digest: fff452ed6b03903cb4d5c2b7c865b7e199fc03f7ce6a5e9449115a1746c37f50
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.10.0.tgz
|
||||
version: 2.10.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2021-09-15T16:46:01.039895438Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 02fd3ce98b427b411bf425cbdd60567072596f3c1ca44ff3ecb17f4852cd0099
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.9.1.tgz
|
||||
version: 2.9.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2021-08-06T07:39:04.864672062Z"
|
||||
description: A Helm chart for kured
|
||||
digest: ee06afc5ba1af0591ac29f1be1425517a855959112d2fa7bc185df905f793d90
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.9.0.tgz
|
||||
version: 2.9.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2021-07-26T11:19:41.659147727Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 68154ea2c074c0d331548b9e17f3c3246b283251eb1c5331eabb60dba168c1ed
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.8.0.tgz
|
||||
version: 2.8.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2021-07-16T07:55:57.986831107Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 2607eabd4c1fd308e9825f30148ee67bc066660f800c92eeaffb7a9678c5451f
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.7.1.tgz
|
||||
version: 2.7.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2021-06-17T16:14:33.768706163Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 85ab0f0d25a26a863bce43100dc3ad9584b6f11319ca6d320093ed33acf3bc6f
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.7.0.tgz
|
||||
version: 2.7.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2021-05-20T11:56:16.670153606Z"
|
||||
description: A Helm chart for kured
|
||||
digest: b783d7acd1c19d3b12474a9e74d0bf396b5cb2c2b4984246cb1d1f8bc2c12d68
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.6.0.tgz
|
||||
version: 2.6.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2021-05-19T17:10:18.386329817Z"
|
||||
description: A Helm chart for kured
|
||||
digest: d4815d495cc9476dcb6e8204e9a2791fac1f89f17a9136d3167d202be88f7000
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.5.0.tgz
|
||||
version: 2.5.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-04-14T08:11:51.869402029Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 1961e0937676e0bcb8ceb7a4973c61450d059e2d4beea78481a9323cf0b964a6
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.4.3.tgz
|
||||
version: 2.4.3
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-04-06T13:01:16.715078451Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 4f26e153bec10f32d120c9abb521262aba97d96fbb80b0e8829b41157b556c4b
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/main/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.4.2.tgz
|
||||
version: 2.4.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-04-06T13:01:16.714161094Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 4788a1d33a938b6c17a760d6602eb03d68c86eb6be46c50272d9ebeeee3941ae
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.4.1.tgz
|
||||
version: 2.4.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-04-06T13:01:16.713214735Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 5cb1837122133aa6022b56140fb04583f232b4199ed44fe3746a6240e9d116a2
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.4.0.tgz
|
||||
version: 2.4.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-04-06T13:01:16.712224972Z"
|
||||
description: A Helm chart for kured
|
||||
digest: d6eed3eac12ea285716e46f8de0fc101692fc1827d6a56780976ef8f0c4d1cce
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.3.2.tgz
|
||||
version: 2.3.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-04-06T13:01:16.711557431Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 84a75e3967d13440e3a856ecfc5a2a845ce19089a8b8b8da30d3e6344d1f3c3b
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.3.1.tgz
|
||||
version: 2.3.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-04-06T13:01:16.710894489Z"
|
||||
description: A Helm chart for kured
|
||||
digest: db5f718db2a38cc4c46b5afb41fbc4cb82ac5298388008589bb1fc321d233ca3
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.3.0.tgz
|
||||
version: 2.3.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.1
|
||||
created: "2021-04-06T13:01:16.709668812Z"
|
||||
description: A Helm chart for kured
|
||||
digest: b3a8b13a79efa56a0a94fa91976faa4916fbdab826d9f50ddf63f4d9179a36e4
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.2.4.tgz
|
||||
version: 2.2.4
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.1
|
||||
created: "2021-04-06T13:01:16.70899537Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 47d881f78ce887567dd3513c5bf0a1c4532c34e05cd9697cc602ce9e461fd10a
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.2.1.tgz
|
||||
version: 2.2.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.0
|
||||
created: "2021-04-06T13:01:16.708325128Z"
|
||||
description: A Helm chart for kured
|
||||
digest: f1d8d83d9992346275d8ed5b4cdb84164cbeaada73b1ff11d802f0d7a38c1621
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
- email: david@davidkarlsen.com
|
||||
name: davidkarlsen
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.2.0.tgz
|
||||
version: 2.2.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.5
|
||||
created: "2021-04-06T13:01:16.707676487Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 5c63a1bf4aff4394afb703f44d6f20bcb0d9f79af4a89b7a1476148e5f8b0fd5
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: daniel@weave.works
|
||||
name: dholbach
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.1.1.tgz
|
||||
version: 2.1.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.4
|
||||
created: "2021-04-06T13:01:16.707031347Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 8ae0a2884d185ac6311d9333ba7b29c8815a2b433892bc073922c9ad5c0771bc
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: daniel@weave.works
|
||||
name: dholbach
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.0.3.tgz
|
||||
version: 2.0.3
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.3
|
||||
created: "2021-04-06T13:01:16.706360205Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 6b8057d3f8f5774ae75a57e38e63fe73ac7230871082177bd219543e03bc3981
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: daniel@weave.works
|
||||
name: dholbach
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.0.1.tgz
|
||||
version: 2.0.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.2
|
||||
created: "2021-04-06T13:01:16.705726665Z"
|
||||
description: A Helm chart for kured
|
||||
digest: 3a97561f4b5ad420a9e73ca88bcfdc29f25d722195614fc797b770ff053df672
|
||||
home: https://github.com/weaveworks/kured
|
||||
icon: https://raw.githubusercontent.com/weaveworks/kured/master/img/logo.png
|
||||
maintainers:
|
||||
- email: daniel@weave.works
|
||||
name: dholbach
|
||||
- email: christian.kotzbauer@gmail.com
|
||||
name: ckotzbauer
|
||||
name: kured
|
||||
sources:
|
||||
- https://github.com/weaveworks/kured
|
||||
urls:
|
||||
- https://weaveworks.github.io/kured/kured-2.0.0.tgz
|
||||
version: 2.0.0
|
||||
generated: "2022-08-20T09:11:27.825605805Z"
|
||||
BIN
kured-2.0.0.tgz
Normal file
BIN
kured-2.0.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.0.1.tgz
Normal file
BIN
kured-2.0.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.0.3.tgz
Normal file
BIN
kured-2.0.3.tgz
Normal file
Binary file not shown.
BIN
kured-2.1.1.tgz
Normal file
BIN
kured-2.1.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.10.0.tgz
Normal file
BIN
kured-2.10.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.10.1.tgz
Normal file
BIN
kured-2.10.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.10.2.tgz
Normal file
BIN
kured-2.10.2.tgz
Normal file
Binary file not shown.
BIN
kured-2.11.0.tgz
Normal file
BIN
kured-2.11.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.11.1.tgz
Normal file
BIN
kured-2.11.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.11.2.tgz
Normal file
BIN
kured-2.11.2.tgz
Normal file
Binary file not shown.
BIN
kured-2.12.0.tgz
Normal file
BIN
kured-2.12.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.12.1.tgz
Normal file
BIN
kured-2.12.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.13.0.tgz
Normal file
BIN
kured-2.13.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.14.0.tgz
Normal file
BIN
kured-2.14.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.14.1.tgz
Normal file
BIN
kured-2.14.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.14.2.tgz
Normal file
BIN
kured-2.14.2.tgz
Normal file
Binary file not shown.
BIN
kured-2.15.0.tgz
Normal file
BIN
kured-2.15.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.16.0.tgz
Normal file
BIN
kured-2.16.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.17.0.tgz
Normal file
BIN
kured-2.17.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.2.0.tgz
Normal file
BIN
kured-2.2.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.2.1.tgz
Normal file
BIN
kured-2.2.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.2.4.tgz
Normal file
BIN
kured-2.2.4.tgz
Normal file
Binary file not shown.
BIN
kured-2.3.0.tgz
Normal file
BIN
kured-2.3.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.3.1.tgz
Normal file
BIN
kured-2.3.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.3.2.tgz
Normal file
BIN
kured-2.3.2.tgz
Normal file
Binary file not shown.
BIN
kured-2.4.0.tgz
Normal file
BIN
kured-2.4.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.4.1.tgz
Normal file
BIN
kured-2.4.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.4.2.tgz
Normal file
BIN
kured-2.4.2.tgz
Normal file
Binary file not shown.
BIN
kured-2.4.3.tgz
Normal file
BIN
kured-2.4.3.tgz
Normal file
Binary file not shown.
BIN
kured-2.5.0.tgz
Normal file
BIN
kured-2.5.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.6.0.tgz
Normal file
BIN
kured-2.6.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.7.0.tgz
Normal file
BIN
kured-2.7.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.7.1.tgz
Normal file
BIN
kured-2.7.1.tgz
Normal file
Binary file not shown.
BIN
kured-2.8.0.tgz
Normal file
BIN
kured-2.8.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.9.0.tgz
Normal file
BIN
kured-2.9.0.tgz
Normal file
Binary file not shown.
BIN
kured-2.9.1.tgz
Normal file
BIN
kured-2.9.1.tgz
Normal file
Binary file not shown.
BIN
kured-3.0.0.tgz
Normal file
BIN
kured-3.0.0.tgz
Normal file
Binary file not shown.
BIN
kured-3.0.1.tgz
Normal file
BIN
kured-3.0.1.tgz
Normal file
Binary file not shown.
@@ -1,81 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kured
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kured # Must match `--ds-name`
|
||||
namespace: kube-system # Must match `--ds-namespace`
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kured
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kured
|
||||
spec:
|
||||
serviceAccountName: kured
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
hostPID: true # Facilitate entering the host mount namespace via init
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: kured
|
||||
image: ghcr.io/kubereboot/kured:1.12.1
|
||||
# If you find yourself here wondering why there is no
|
||||
# :latest tag on Docker Hub,see the FAQ in the README
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
privileged: true # Give permission to nsenter /proc/1/ns/mnt
|
||||
env:
|
||||
# Pass in the name of the node on which this pod is scheduled
|
||||
# for use with drain/uncordon operations and lock acquisition
|
||||
- name: KURED_NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
command:
|
||||
- /usr/bin/kured
|
||||
# - --force-reboot=false
|
||||
# - --drain-grace-period=-1
|
||||
# - --skip-wait-for-delete-timeout=0
|
||||
# - --drain-timeout=0
|
||||
# - --period=1h
|
||||
# - --ds-namespace=kube-system
|
||||
# - --ds-name=kured
|
||||
# - --lock-annotation=weave.works/kured-node-lock
|
||||
# - --lock-ttl=0
|
||||
# - --prometheus-url=http://prometheus.monitoring.svc.cluster.local
|
||||
# - --alert-filter-regexp=^RebootRequired$
|
||||
# - --alert-firing-only=false
|
||||
# - --reboot-sentinel=/var/run/reboot-required
|
||||
# - --prefer-no-schedule-taint=""
|
||||
# - --reboot-sentinel-command=""
|
||||
# - --slack-hook-url=https://hooks.slack.com/...
|
||||
# - --slack-username=prod
|
||||
# - --slack-channel=alerting
|
||||
# - --notify-url="" # See also shoutrrr url format
|
||||
# - --message-template-drain=Draining node %s
|
||||
# - --message-template-reboot=Rebooting node %s
|
||||
# - --message-template-uncordon=Node %s rebooted & uncordoned successfully!
|
||||
# - --blocking-pod-selector=runtime=long,cost=expensive
|
||||
# - --blocking-pod-selector=name=temperamental
|
||||
# - --blocking-pod-selector=...
|
||||
# - --reboot-days=sun,mon,tue,wed,thu,fri,sat
|
||||
# - --reboot-delay=90s
|
||||
# - --start-time=0:00
|
||||
# - --end-time=23:59:59
|
||||
# - --time-zone=UTC
|
||||
# - --annotate-nodes=false
|
||||
# - --lock-release-delay=30m
|
||||
# - --log-format=text
|
||||
@@ -1,63 +0,0 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kured
|
||||
rules:
|
||||
# Allow kured to read spec.unschedulable
|
||||
# Allow kubectl to drain/uncordon
|
||||
#
|
||||
# NB: These permissions are tightly coupled to the bundled version of kubectl; the ones below
|
||||
# match https://github.com/kubernetes/kubernetes/blob/v1.19.4/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go
|
||||
#
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list","delete","get"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kured
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kured
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kured
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: kured
|
||||
rules:
|
||||
# Allow kured to lock/unlock itself
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets"]
|
||||
resourceNames: ["kured"]
|
||||
verbs: ["update"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: kured
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
name: kured
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kured
|
||||
@@ -1,69 +0,0 @@
|
||||
package alerts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
papi "github.com/prometheus/client_golang/api"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// PromClient is a wrapper around the Prometheus Client interface and implements the api
|
||||
// This way, the PromClient can be instantiated with the configuration the Client needs, and
|
||||
// the ability to use the methods the api has, like Query and so on.
|
||||
type PromClient struct {
|
||||
papi papi.Client
|
||||
api v1.API
|
||||
}
|
||||
|
||||
// NewPromClient creates a new client to the Prometheus API.
|
||||
// It returns an error on any problem.
|
||||
func NewPromClient(conf papi.Config) (*PromClient, error) {
|
||||
promClient, err := papi.NewClient(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := PromClient{papi: promClient, api: v1.NewAPI(promClient)}
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// ActiveAlerts is a method of type PromClient, it returns a list of names of active alerts
|
||||
// (e.g. pending or firing), filtered by the supplied regexp or by the includeLabels query.
|
||||
// filter by regexp means when the regex finds the alert-name; the alert is exluded from the
|
||||
// block-list and will NOT block rebooting. query by includeLabel means,
|
||||
// if the query finds an alert, it will include it to the block-list and it WILL block rebooting.
|
||||
func (p *PromClient) ActiveAlerts(filter *regexp.Regexp, firingOnly bool) ([]string, error) {
|
||||
|
||||
// get all alerts from prometheus
|
||||
value, _, err := p.api.Query(context.Background(), "ALERTS", time.Now())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if value.Type() == model.ValVector {
|
||||
if vector, ok := value.(model.Vector); ok {
|
||||
activeAlertSet := make(map[string]bool)
|
||||
for _, sample := range vector {
|
||||
if alertName, isAlert := sample.Metric[model.AlertNameLabel]; isAlert && sample.Value != 0 {
|
||||
if (filter == nil || !filter.MatchString(string(alertName))) && (!firingOnly || sample.Metric["alertstate"] == "firing") {
|
||||
activeAlertSet[string(alertName)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var activeAlerts []string
|
||||
for activeAlert := range activeAlertSet {
|
||||
activeAlerts = append(activeAlerts, activeAlert)
|
||||
}
|
||||
sort.Strings(activeAlerts)
|
||||
|
||||
return activeAlerts, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Unexpected value type: %v", value)
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
package alerts
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/api"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type MockResponse struct {
|
||||
StatusCode int
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// MockServerProperties ties a mock response to a url and a method
|
||||
type MockServerProperties struct {
|
||||
URI string
|
||||
HTTPMethod string
|
||||
Response MockResponse
|
||||
}
|
||||
|
||||
// NewMockServer sets up a new MockServer with properties ad starts the server.
|
||||
func NewMockServer(props ...MockServerProperties) *httptest.Server {
|
||||
|
||||
handler := http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
for _, proc := range props {
|
||||
_, err := w.Write(proc.Response.Body)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
return httptest.NewServer(handler)
|
||||
}
|
||||
|
||||
func TestActiveAlerts(t *testing.T) {
|
||||
responsebody := `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"GatekeeperViolations","alertstate":"firing","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"PodCrashing-dev","alertstate":"firing","container":"deployment","instance":"1.2.3.4:8080","job":"kube-state-metrics","namespace":"dev","pod":"dev-deployment-78dcbmf25v","severity":"critical","team":"dev"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"PodRestart-dev","alertstate":"firing","container":"deployment","instance":"1.2.3.4:1234","job":"kube-state-metrics","namespace":"qa","pod":"qa-job-deployment-78dcbmf25v","severity":"warning","team":"qa"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"PrometheusTargetDown","alertstate":"firing","job":"kubernetes-pods","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]},{"metric":{"__name__":"ALERTS","alertname":"ScheduledRebootFailing","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`
|
||||
addr := "http://localhost:10001"
|
||||
|
||||
for _, tc := range []struct {
|
||||
it string
|
||||
rFilter string
|
||||
respBody string
|
||||
aName string
|
||||
wantN int
|
||||
firingOnly bool
|
||||
}{
|
||||
{
|
||||
it: "should return no active alerts",
|
||||
respBody: responsebody,
|
||||
rFilter: "",
|
||||
wantN: 0,
|
||||
firingOnly: false,
|
||||
},
|
||||
{
|
||||
it: "should return a subset of all alerts",
|
||||
respBody: responsebody,
|
||||
rFilter: "Pod",
|
||||
wantN: 3,
|
||||
firingOnly: false,
|
||||
},
|
||||
{
|
||||
it: "should return all active alerts by regex",
|
||||
respBody: responsebody,
|
||||
rFilter: "*",
|
||||
wantN: 5,
|
||||
firingOnly: false,
|
||||
},
|
||||
{
|
||||
it: "should return all active alerts by regex filter",
|
||||
respBody: responsebody,
|
||||
rFilter: "*",
|
||||
wantN: 5,
|
||||
firingOnly: false,
|
||||
},
|
||||
{
|
||||
it: "should return only firing alerts if firingOnly is true",
|
||||
respBody: responsebody,
|
||||
rFilter: "*",
|
||||
wantN: 4,
|
||||
firingOnly: true,
|
||||
},
|
||||
{
|
||||
it: "should return ScheduledRebootFailing active alerts",
|
||||
respBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"ScheduledRebootFailing","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`,
|
||||
aName: "ScheduledRebootFailing",
|
||||
rFilter: "*",
|
||||
wantN: 1,
|
||||
firingOnly: false,
|
||||
},
|
||||
{
|
||||
it: "should not return an active alert if RebootRequired is firing (regex filter)",
|
||||
respBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"ALERTS","alertname":"RebootRequired","alertstate":"pending","severity":"warning","team":"platform-infra"},"value":[1622472933.973,"1"]}]}}`,
|
||||
rFilter: "RebootRequired",
|
||||
wantN: 0,
|
||||
firingOnly: false,
|
||||
},
|
||||
} {
|
||||
// Start mockServer
|
||||
mockServer := NewMockServer(MockServerProperties{
|
||||
URI: addr,
|
||||
HTTPMethod: http.MethodPost,
|
||||
Response: MockResponse{
|
||||
Body: []byte(tc.respBody),
|
||||
},
|
||||
})
|
||||
// Close mockServer after all connections are gone
|
||||
defer mockServer.Close()
|
||||
|
||||
t.Run(tc.it, func(t *testing.T) {
|
||||
|
||||
// regex filter
|
||||
regex, _ := regexp.Compile(tc.rFilter)
|
||||
|
||||
// instantiate the prometheus client with the mockserver-address
|
||||
p, err := NewPromClient(api.Config{Address: mockServer.URL})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
result, err := p.ActiveAlerts(regex, tc.firingOnly)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// assert
|
||||
assert.Equal(t, tc.wantN, len(result), "expected amount of alerts %v, got %v", tc.wantN, len(result))
|
||||
|
||||
if tc.aName != "" {
|
||||
assert.Equal(t, tc.aName, result[0], "expected active alert %v, got %v", tc.aName, result[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
package daemonsetlock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
k8sAPICallRetrySleep = 5 * time.Second // How much time to wait in between retrying a k8s API call
|
||||
k8sAPICallRetryTimeout = 5 * time.Minute // How long to wait until we determine that the k8s API is definitively unavailable
|
||||
)
|
||||
|
||||
// DaemonSetLock holds all necessary information to do actions
|
||||
// on the kured ds which holds lock info through annotations.
|
||||
type DaemonSetLock struct {
|
||||
client *kubernetes.Clientset
|
||||
nodeID string
|
||||
namespace string
|
||||
name string
|
||||
annotation string
|
||||
}
|
||||
|
||||
type lockAnnotationValue struct {
|
||||
NodeID string `json:"nodeID"`
|
||||
Metadata interface{} `json:"metadata,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
TTL time.Duration `json:"TTL"`
|
||||
}
|
||||
|
||||
// New creates a daemonsetLock object containing the necessary data for follow up k8s requests
|
||||
func New(client *kubernetes.Clientset, nodeID, namespace, name, annotation string) *DaemonSetLock {
|
||||
return &DaemonSetLock{client, nodeID, namespace, name, annotation}
|
||||
}
|
||||
|
||||
// Acquire attempts to annotate the kured daemonset with lock info from instantiated DaemonSetLock using client-go
|
||||
func (dsl *DaemonSetLock) Acquire(metadata interface{}, TTL time.Duration) (bool, string, error) {
|
||||
for {
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return false, "", fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
value := lockAnnotationValue{}
|
||||
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
if !ttlExpired(value.Created, value.TTL) {
|
||||
return value.NodeID == dsl.nodeID, value.NodeID, nil
|
||||
}
|
||||
}
|
||||
|
||||
if ds.ObjectMeta.Annotations == nil {
|
||||
ds.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
value := lockAnnotationValue{NodeID: dsl.nodeID, Metadata: metadata, Created: time.Now().UTC(), TTL: TTL}
|
||||
valueBytes, err := json.Marshal(&value)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
ds.ObjectMeta.Annotations[dsl.annotation] = string(valueBytes)
|
||||
|
||||
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
// Something else updated the resource between us reading and writing - try again soon
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
} else {
|
||||
return false, "", err
|
||||
}
|
||||
}
|
||||
return true, dsl.nodeID, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Test attempts to check the kured daemonset lock status (existence, expiry) from instantiated DaemonSetLock using client-go
|
||||
func (dsl *DaemonSetLock) Test(metadata interface{}) (bool, error) {
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
value := lockAnnotationValue{Metadata: metadata}
|
||||
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !ttlExpired(value.Created, value.TTL) {
|
||||
return value.NodeID == dsl.nodeID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Release attempts to remove the lock data from the kured ds annotations using client-go
|
||||
func (dsl *DaemonSetLock) Release() error {
|
||||
for {
|
||||
ds, err := dsl.GetDaemonSet(k8sAPICallRetrySleep, k8sAPICallRetryTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("timed out trying to get daemonset %s in namespace %s: %w", dsl.name, dsl.namespace, err)
|
||||
}
|
||||
|
||||
valueString, exists := ds.ObjectMeta.Annotations[dsl.annotation]
|
||||
if exists {
|
||||
value := lockAnnotationValue{}
|
||||
if err := json.Unmarshal([]byte(valueString), &value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if value.NodeID != dsl.nodeID {
|
||||
return fmt.Errorf("Not lock holder: %v", value.NodeID)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Lock not held")
|
||||
}
|
||||
|
||||
delete(ds.ObjectMeta.Annotations, dsl.annotation)
|
||||
|
||||
_, err = dsl.client.AppsV1().DaemonSets(dsl.namespace).Update(context.TODO(), ds, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
// Something else updated the resource between us reading and writing - try again soon
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSet returns the named DaemonSet resource from the DaemonSetLock's configured client
|
||||
func (dsl *DaemonSetLock) GetDaemonSet(sleep, timeout time.Duration) (*v1.DaemonSet, error) {
|
||||
var ds *v1.DaemonSet
|
||||
var lastError error
|
||||
err := wait.PollImmediate(sleep, timeout, func() (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
if ds, lastError = dsl.client.AppsV1().DaemonSets(dsl.namespace).Get(ctx, dsl.name, metav1.GetOptions{}); lastError != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Timed out trying to get daemonset %s in namespace %s: %v", dsl.name, dsl.namespace, lastError)
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
func ttlExpired(created time.Time, ttl time.Duration) bool {
|
||||
if ttl > 0 && time.Since(created) >= ttl {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package daemonsetlock
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTtlExpired(t *testing.T) {
|
||||
d := time.Date(2020, 05, 05, 14, 15, 0, 0, time.UTC)
|
||||
second, _ := time.ParseDuration("1s")
|
||||
zero, _ := time.ParseDuration("0m")
|
||||
|
||||
tests := []struct {
|
||||
created time.Time
|
||||
ttl time.Duration
|
||||
result bool
|
||||
}{
|
||||
{d, second, true},
|
||||
{time.Now(), second, false},
|
||||
{d, zero, false},
|
||||
}
|
||||
|
||||
for i, tst := range tests {
|
||||
if ttlExpired(tst.created, tst.ttl) != tst.result {
|
||||
t.Errorf("Test %d failed, expected %v but got %v", i, tst.result, !tst.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package delaytick
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// New ticks regularly after an initial delay randomly distributed between d/2 and d + d/2
|
||||
func New(s rand.Source, d time.Duration) <-chan time.Time {
|
||||
c := make(chan time.Time)
|
||||
|
||||
go func() {
|
||||
random := rand.New(s)
|
||||
time.Sleep(time.Duration(float64(d)/2 + float64(d)*random.Float64()))
|
||||
c <- time.Now()
|
||||
for t := range time.Tick(d) {
|
||||
c <- t
|
||||
}
|
||||
}()
|
||||
|
||||
return c
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
package taints
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// Taint allows to set soft and hard limitations for scheduling and executing pods on nodes.
|
||||
type Taint struct {
|
||||
client *kubernetes.Clientset
|
||||
nodeID string
|
||||
taintName string
|
||||
effect v1.TaintEffect
|
||||
exists bool
|
||||
}
|
||||
|
||||
// New provides a new taint.
|
||||
func New(client *kubernetes.Clientset, nodeID, taintName string, effect v1.TaintEffect) *Taint {
|
||||
exists, _, _ := taintExists(client, nodeID, taintName)
|
||||
|
||||
return &Taint{
|
||||
client: client,
|
||||
nodeID: nodeID,
|
||||
taintName: taintName,
|
||||
effect: effect,
|
||||
exists: exists,
|
||||
}
|
||||
}
|
||||
|
||||
// Enable creates the taint for a node. Creating an existing taint is a noop.
|
||||
func (t *Taint) Enable() {
|
||||
if t.taintName == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if t.exists {
|
||||
return
|
||||
}
|
||||
|
||||
preferNoSchedule(t.client, t.nodeID, t.taintName, t.effect, true)
|
||||
|
||||
t.exists = true
|
||||
}
|
||||
|
||||
// Disable removes the taint for a node. Removing a missing taint is a noop.
|
||||
func (t *Taint) Disable() {
|
||||
if t.taintName == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if !t.exists {
|
||||
return
|
||||
}
|
||||
|
||||
preferNoSchedule(t.client, t.nodeID, t.taintName, t.effect, false)
|
||||
|
||||
t.exists = false
|
||||
}
|
||||
|
||||
func taintExists(client *kubernetes.Clientset, nodeID, taintName string) (bool, int, *v1.Node) {
|
||||
updatedNode, err := client.CoreV1().Nodes().Get(context.TODO(), nodeID, metav1.GetOptions{})
|
||||
if err != nil || updatedNode == nil {
|
||||
log.Fatalf("Error reading node %s: %v", nodeID, err)
|
||||
}
|
||||
|
||||
for i, taint := range updatedNode.Spec.Taints {
|
||||
if taint.Key == taintName {
|
||||
return true, i, updatedNode
|
||||
}
|
||||
}
|
||||
|
||||
return false, 0, updatedNode
|
||||
}
|
||||
|
||||
func preferNoSchedule(client *kubernetes.Clientset, nodeID, taintName string, effect v1.TaintEffect, shouldExists bool) {
|
||||
taintExists, offset, updatedNode := taintExists(client, nodeID, taintName)
|
||||
|
||||
if taintExists && shouldExists {
|
||||
log.Debugf("Taint %v exists already for node %v.", taintName, nodeID)
|
||||
return
|
||||
}
|
||||
|
||||
if !taintExists && !shouldExists {
|
||||
log.Debugf("Taint %v already missing for node %v.", taintName, nodeID)
|
||||
return
|
||||
}
|
||||
|
||||
type patchTaints struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
Value interface{} `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
taint := v1.Taint{
|
||||
Key: taintName,
|
||||
Effect: effect,
|
||||
}
|
||||
|
||||
var patches []patchTaints
|
||||
|
||||
if len(updatedNode.Spec.Taints) == 0 {
|
||||
// add first taint and ensure to keep current taints
|
||||
patches = []patchTaints{
|
||||
{
|
||||
Op: "test",
|
||||
Path: "/spec",
|
||||
Value: updatedNode.Spec,
|
||||
},
|
||||
{
|
||||
Op: "add",
|
||||
Path: "/spec/taints",
|
||||
Value: []v1.Taint{},
|
||||
},
|
||||
{
|
||||
Op: "add",
|
||||
Path: "/spec/taints/-",
|
||||
Value: taint,
|
||||
},
|
||||
}
|
||||
} else if taintExists {
|
||||
// remove taint and ensure to test against race conditions
|
||||
patches = []patchTaints{
|
||||
{
|
||||
Op: "test",
|
||||
Path: fmt.Sprintf("/spec/taints/%d", offset),
|
||||
Value: taint,
|
||||
},
|
||||
{
|
||||
Op: "remove",
|
||||
Path: fmt.Sprintf("/spec/taints/%d", offset),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// add missing taint to exsting list
|
||||
patches = []patchTaints{
|
||||
{
|
||||
Op: "add",
|
||||
Path: "/spec/taints/-",
|
||||
Value: taint,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
patchBytes, err := json.Marshal(patches)
|
||||
if err != nil {
|
||||
log.Fatalf("Error encoding taint patch for node %s: %v", nodeID, err)
|
||||
}
|
||||
|
||||
_, err = client.CoreV1().Nodes().Patch(context.TODO(), nodeID, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
log.Fatalf("Error patching taint for node %s: %v", nodeID, err)
|
||||
}
|
||||
|
||||
if shouldExists {
|
||||
log.Info("Node taint added")
|
||||
} else {
|
||||
log.Info("Node taint removed")
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package timewindow
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EveryDay contains all days of the week, and exports it
|
||||
// for convenience use in the cmd line arguments.
|
||||
var EveryDay = []string{"su", "mo", "tu", "we", "th", "fr", "sa"}
|
||||
|
||||
// dayStrings maps day strings to time.Weekdays
|
||||
var dayStrings = map[string]time.Weekday{
|
||||
"su": time.Sunday,
|
||||
"sun": time.Sunday,
|
||||
"sunday": time.Sunday,
|
||||
"mo": time.Monday,
|
||||
"mon": time.Monday,
|
||||
"monday": time.Monday,
|
||||
"tu": time.Tuesday,
|
||||
"tue": time.Tuesday,
|
||||
"tuesday": time.Tuesday,
|
||||
"we": time.Wednesday,
|
||||
"wed": time.Wednesday,
|
||||
"wednesday": time.Wednesday,
|
||||
"th": time.Thursday,
|
||||
"thu": time.Thursday,
|
||||
"thursday": time.Thursday,
|
||||
"fr": time.Friday,
|
||||
"fri": time.Friday,
|
||||
"friday": time.Friday,
|
||||
"sa": time.Saturday,
|
||||
"sat": time.Saturday,
|
||||
"saturday": time.Saturday,
|
||||
}
|
||||
|
||||
type weekdays uint32
|
||||
|
||||
// parseWeekdays creates a set of weekdays from a string slice
|
||||
func parseWeekdays(days []string) (weekdays, error) {
|
||||
var result uint32
|
||||
for _, day := range days {
|
||||
if len(day) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
weekday, err := parseWeekday(day)
|
||||
if err != nil {
|
||||
return weekdays(0), err
|
||||
}
|
||||
|
||||
result |= 1 << uint32(weekday)
|
||||
}
|
||||
|
||||
return weekdays(result), nil
|
||||
}
|
||||
|
||||
// Contains returns true if the specified weekday is a member of this set.
|
||||
func (w weekdays) Contains(day time.Weekday) bool {
|
||||
return uint32(w)&(1<<uint32(day)) != 0
|
||||
}
|
||||
|
||||
// String returns a string representation of the set of weekdays.
|
||||
func (w weekdays) String() string {
|
||||
var b strings.Builder
|
||||
for i := uint32(0); i < 7; i++ {
|
||||
if uint32(w)&(1<<i) != 0 {
|
||||
b.WriteString(time.Weekday(i).String()[0:3])
|
||||
} else {
|
||||
b.WriteString("---")
|
||||
}
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func parseWeekday(day string) (time.Weekday, error) {
|
||||
if n, err := strconv.Atoi(day); err == nil {
|
||||
if n >= 0 && n < 7 {
|
||||
return time.Weekday(n), nil
|
||||
}
|
||||
return time.Sunday, fmt.Errorf("Invalid weekday, number out of range: %s", day)
|
||||
}
|
||||
|
||||
if weekday, ok := dayStrings[strings.ToLower(day)]; ok {
|
||||
return weekday, nil
|
||||
}
|
||||
return time.Sunday, fmt.Errorf("Invalid weekday: %s", day)
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package timewindow
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseWeekdays(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
result string
|
||||
}{
|
||||
{"0,4", "Sun---------Thu------"},
|
||||
{"su,mo,tu", "SunMonTue------------"},
|
||||
{"sunday,tu,thu", "Sun---Tue---Thu------"},
|
||||
{"THURSDAY", "------------Thu------"},
|
||||
{"we,WED,WeDnEsDaY", "---------Wed---------"},
|
||||
{"", "---------------------"},
|
||||
{",,,", "---------------------"},
|
||||
}
|
||||
|
||||
for _, tst := range tests {
|
||||
res, err := parseWeekdays(strings.Split(tst.input, ","))
|
||||
if err != nil {
|
||||
t.Errorf("Received error for input %s: %v", tst.input, err)
|
||||
} else if res.String() != tst.result {
|
||||
t.Errorf("Test %s: Expected %s got %s", tst.input, tst.result, res.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseWeekdaysErrors(t *testing.T) {
|
||||
tests := []string{
|
||||
"15",
|
||||
"-8",
|
||||
"8",
|
||||
"mon,tue,wed,fridayyyy",
|
||||
}
|
||||
|
||||
for _, tst := range tests {
|
||||
_, err := parseWeekdays(strings.Split(tst, ","))
|
||||
if err == nil {
|
||||
t.Errorf("Expected to receive error for input %s", tst)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package timewindow
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TimeWindow specifies a schedule of days and times.
|
||||
type TimeWindow struct {
|
||||
days weekdays
|
||||
location *time.Location
|
||||
startTime time.Time
|
||||
endTime time.Time
|
||||
}
|
||||
|
||||
// New creates a TimeWindow instance based on string inputs specifying a schedule.
|
||||
func New(days []string, startTime, endTime, location string) (*TimeWindow, error) {
|
||||
tw := &TimeWindow{}
|
||||
|
||||
var err error
|
||||
if tw.days, err = parseWeekdays(days); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tw.location, err = time.LoadLocation(location); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tw.startTime, err = parseTime(startTime, tw.location); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tw.endTime, err = parseTime(endTime, tw.location); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tw, nil
|
||||
}
|
||||
|
||||
// Contains determines whether the specified time is within this time window.
|
||||
func (tw *TimeWindow) Contains(t time.Time) bool {
|
||||
loctime := t.In(tw.location)
|
||||
if !tw.days.Contains(loctime.Weekday()) {
|
||||
return false
|
||||
}
|
||||
|
||||
start := time.Date(loctime.Year(), loctime.Month(), loctime.Day(), tw.startTime.Hour(), tw.startTime.Minute(), tw.startTime.Second(), 0, tw.location)
|
||||
end := time.Date(loctime.Year(), loctime.Month(), loctime.Day(), tw.endTime.Hour(), tw.endTime.Minute(), tw.endTime.Second(), 1e9-1, tw.location)
|
||||
|
||||
// Time Wrap validation
|
||||
// First we check for start and end time, if start is after end time
|
||||
// Next we need to validate if we want to wrap to the day before or to the day after
|
||||
// For that we check the loctime value to see if it is before end time, we wrap with the day before
|
||||
// Otherwise we wrap to the next day.
|
||||
if tw.startTime.After(tw.endTime) {
|
||||
if loctime.Before(end) {
|
||||
start = start.Add(-24 * time.Hour)
|
||||
} else {
|
||||
end = end.Add(24 * time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
return (loctime.After(start) || loctime.Equal(start)) && (loctime.Before(end) || loctime.Equal(end))
|
||||
}
|
||||
|
||||
// String returns a string representation of this time window.
|
||||
func (tw *TimeWindow) String() string {
|
||||
return fmt.Sprintf("%s between %02d:%02d and %02d:%02d %s", tw.days.String(), tw.startTime.Hour(), tw.startTime.Minute(), tw.endTime.Hour(), tw.endTime.Minute(), tw.location.String())
|
||||
}
|
||||
|
||||
// parseTime tries to parse a time with several formats.
|
||||
func parseTime(s string, loc *time.Location) (time.Time, error) {
|
||||
fmts := []string{"15:04", "15:04:05", "03:04pm", "15", "03pm", "3pm"}
|
||||
for _, f := range fmts {
|
||||
if t, err := time.ParseInLocation(f, s, loc); err == nil {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
|
||||
return time.Now(), fmt.Errorf("Invalid time format: %s", s)
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
package timewindow
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTimeWindows(t *testing.T) {
|
||||
type testcase struct {
|
||||
time string
|
||||
result bool
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
days string
|
||||
start string
|
||||
end string
|
||||
loc string
|
||||
cases []testcase
|
||||
}{
|
||||
{"mon,tue,wed,thu,fri", "9am", "5pm", "America/Los_Angeles", []testcase{
|
||||
{"2019/03/31 10:00 PDT", false},
|
||||
{"2019/04/04 00:49 PDT", false},
|
||||
{"2019/04/04 12:00 PDT", true},
|
||||
{"2019/04/04 11:59 UTC", false},
|
||||
{"2019/04/05 08:59 PDT", false},
|
||||
{"2019/04/05 9:01 PDT", true},
|
||||
}},
|
||||
{"mon,we,fri", "10:01", "11:30am", "America/Los_Angeles", []testcase{
|
||||
{"2019/04/05 10:30 PDT", true},
|
||||
{"2019/04/06 10:30 PDT", false},
|
||||
{"2019/04/07 10:30 PDT", false},
|
||||
{"2019/04/08 10:30 PDT", true},
|
||||
{"2019/04/09 10:30 PDT", false},
|
||||
{"2019/04/10 10:30 PDT", true},
|
||||
{"2019/04/11 10:30 PDT", false},
|
||||
}},
|
||||
{"mo,tu,we,th,fr", "00:00", "23:59:59", "UTC", []testcase{
|
||||
{"2019/04/18 00:00 UTC", true},
|
||||
{"2019/04/18 23:59 UTC", true},
|
||||
}},
|
||||
{"mon,tue,wed,thu,fri", "9pm", "5am", "America/Los_Angeles", []testcase{
|
||||
{"2019/03/30 04:00 PDT", false},
|
||||
{"2019/03/31 10:00 PDT", false},
|
||||
{"2019/03/31 22:00 PDT", false},
|
||||
{"2019/04/04 00:49 PDT", true},
|
||||
{"2019/04/04 12:00 PDT", false},
|
||||
{"2019/04/04 22:49 PDT", true},
|
||||
{"2019/04/05 00:49 PDT", true},
|
||||
{"2019/04/05 08:59 PDT", false},
|
||||
{"2019/04/05 9:01 PDT", false},
|
||||
}},
|
||||
{"mon,tue,wed,thu,fri", "11:59pm", "00:01am", "America/Los_Angeles", []testcase{
|
||||
{"2019/04/04 23:58 PDT", false},
|
||||
{"2019/04/04 23:59 PDT", true},
|
||||
{"2019/04/05 00:00 PDT", true},
|
||||
{"2019/04/05 00:01 PDT", true},
|
||||
{"2019/04/05 00:02 PDT", false},
|
||||
}},
|
||||
{"mon,tue,wed,fri", "11:59pm", "00:01am", "America/Los_Angeles", []testcase{
|
||||
{"2019/04/04 23:58 PDT", false},
|
||||
{"2019/04/04 23:59 PDT", false}, // Even that this falls in the between the hours Thursday is not included so should not run
|
||||
{"2019/04/05 00:00 PDT", true},
|
||||
{"2019/04/05 00:02 PDT", false},
|
||||
}},
|
||||
{"mon,tue,wed,thu", "11:59pm", "00:01am", "America/Los_Angeles", []testcase{
|
||||
{"2019/04/04 23:58 PDT", false},
|
||||
{"2019/04/04 23:59 PDT", true},
|
||||
{"2019/04/05 00:00 PDT", false}, // Even that this falls in the between the hours Friday is not included so should not run
|
||||
{"2019/04/05 00:02 PDT", false},
|
||||
}},
|
||||
{"mon,tue,wed,thu,fri", "11:59pm", "00:01am", "UTC", []testcase{
|
||||
{"2019/04/04 23:58 UTC", false},
|
||||
{"2019/04/04 23:59 UTC", true},
|
||||
{"2019/04/05 00:00 UTC", true},
|
||||
{"2019/04/05 00:01 UTC", true},
|
||||
{"2019/04/05 00:02 UTC", false},
|
||||
}},
|
||||
}
|
||||
|
||||
for i, tst := range tests {
|
||||
tw, err := New(strings.Split(tst.days, ","), tst.start, tst.end, tst.loc)
|
||||
if err != nil {
|
||||
t.Errorf("Test [%d] failed to create TimeWindow: %v", i, err)
|
||||
}
|
||||
|
||||
for _, cas := range tst.cases {
|
||||
tm, err := time.ParseInLocation("2006/01/02 15:04 MST", cas.time, tw.location)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to parse time \"%s\": %v", cas.time, err)
|
||||
} else if cas.result != tw.Contains(tm) {
|
||||
t.Errorf("(%s) contains (%s) didn't match expected result of %v", tw.String(), cas.time, cas.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# USE KUBECTL_CMD to pass context and/or namespaces.
|
||||
KUBECTL_CMD="${KUBECTL_CMD:-kubectl}"
|
||||
SENTINEL_FILE="${SENTINEL_FILE:-/var/run/reboot-required}"
|
||||
|
||||
echo "Creating reboot sentinel on all nodes"
|
||||
|
||||
for nodename in $("$KUBECTL_CMD" get nodes -o name); do
|
||||
docker exec "${nodename/node\//}" hostname
|
||||
docker exec "${nodename/node\//}" touch "${SENTINEL_FILE}"
|
||||
done
|
||||
@@ -1,85 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
NODECOUNT=${NODECOUNT:-5}
|
||||
KUBECTL_CMD="${KUBECTL_CMD:-kubectl}"
|
||||
DEBUG="${DEBUG:-false}"
|
||||
CONTAINER_NAME_FORMAT=${CONTAINER_NAME_FORMAT:-"chart-testing-*"}
|
||||
|
||||
tmp_dir=$(mktemp -d -t kured-XXXX)
|
||||
function gather_logs_and_cleanup {
|
||||
if [[ -f "$tmp_dir"/node_output ]]; then
|
||||
rm "$tmp_dir"/node_output
|
||||
fi
|
||||
rmdir "$tmp_dir"
|
||||
|
||||
# The next commands are useful regardless of success or failures.
|
||||
if [[ "$DEBUG" == "true" ]]; then
|
||||
echo "############################################################"
|
||||
# This is useful to see if containers have crashed.
|
||||
echo "docker ps -a:"
|
||||
docker ps -a
|
||||
echo "docker journal logs"
|
||||
journalctl -u docker --no-pager
|
||||
|
||||
# This is useful to see if the nodes have _properly_ rebooted.
|
||||
# It should show the reboot/two container starts per node.
|
||||
for name in $(docker ps -a -f "name=${CONTAINER_NAME_FORMAT}" -q); do
|
||||
echo "############################################################"
|
||||
echo "docker logs for container $name:"
|
||||
docker logs "$name"
|
||||
done
|
||||
|
||||
fi
|
||||
}
|
||||
trap gather_logs_and_cleanup EXIT
|
||||
|
||||
declare -A was_unschedulable
|
||||
declare -A has_recovered
|
||||
max_attempts="60"
|
||||
sleep_time=60
|
||||
attempt_num=1
|
||||
|
||||
set +o errexit
|
||||
echo "There are $NODECOUNT nodes in the cluster"
|
||||
until [ ${#was_unschedulable[@]} == "$NODECOUNT" ] && [ ${#has_recovered[@]} == "$NODECOUNT" ]
|
||||
do
|
||||
echo "${#was_unschedulable[@]} nodes were removed from pool once:" "${!was_unschedulable[@]}"
|
||||
echo "${#has_recovered[@]} nodes removed from the pool are now back:" "${!has_recovered[@]}"
|
||||
|
||||
"$KUBECTL_CMD" get nodes -o custom-columns=NAME:.metadata.name,SCHEDULABLE:.spec.unschedulable --no-headers > "$tmp_dir"/node_output
|
||||
if [[ "$DEBUG" == "true" ]]; then
|
||||
# This is useful to see if a node gets stuck after drain, and doesn't
|
||||
# come back up.
|
||||
echo "Result of command $KUBECTL_CMD get nodes ... showing unschedulable nodes:"
|
||||
cat "$tmp_dir"/node_output
|
||||
fi
|
||||
while read -r node; do
|
||||
unschedulable=$(echo "$node" | grep true | cut -f 1 -d ' ')
|
||||
if [ -n "$unschedulable" ] && [ -z ${was_unschedulable["$unschedulable"]+x} ] ; then
|
||||
echo "$unschedulable is now unschedulable!"
|
||||
was_unschedulable["$unschedulable"]=1
|
||||
fi
|
||||
schedulable=$(echo "$node" | grep '<none>' | cut -f 1 -d ' ')
|
||||
if [ -n "$schedulable" ] && [ ${was_unschedulable["$schedulable"]+x} ] && [ -z ${has_recovered["$schedulable"]+x} ]; then
|
||||
echo "$schedulable has recovered!"
|
||||
has_recovered["$schedulable"]=1
|
||||
fi
|
||||
done < "$tmp_dir"/node_output
|
||||
|
||||
if [[ "${#has_recovered[@]}" == "$NODECOUNT" ]]; then
|
||||
echo "All nodes recovered."
|
||||
break
|
||||
else
|
||||
if (( attempt_num == max_attempts ))
|
||||
then
|
||||
echo "Attempt $attempt_num failed and there are no more attempts left!"
|
||||
exit 1
|
||||
else
|
||||
echo "Attempt $attempt_num failed! Trying again in $sleep_time seconds..."
|
||||
sleep "$sleep_time"
|
||||
fi
|
||||
fi
|
||||
(( attempt_num++ ))
|
||||
done
|
||||
|
||||
echo "Test successful"
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
expected="$1"
|
||||
if [[ "$expected" != "0" && "$expected" != "1" ]]; then
|
||||
echo "You should give an argument to this script, the gauge value (0 or 1)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
HOST="${HOST:-localhost}"
|
||||
PORT="${PORT:-30000}"
|
||||
NODENAME="${NODENAME-chart-testing-control-plane}"
|
||||
|
||||
reboot_required=$(docker exec "$NODENAME" curl "http://$HOST:$PORT/metrics" | awk '/^kured_reboot_required/{print $2}')
|
||||
if [[ "$reboot_required" == "$expected" ]]; then
|
||||
echo "Test success"
|
||||
else
|
||||
echo "Test failed"
|
||||
exit 1
|
||||
fi
|
||||
Reference in New Issue
Block a user