mirror of
https://github.com/rancher/k3k.git
synced 2026-02-19 20:40:04 +00:00
Compare commits
22 Commits
chart-0.3.
...
v0.3.4-rc3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d23cf86fce | ||
|
|
65cb8ad123 | ||
|
|
6db88b5a00 | ||
|
|
8d89c7d133 | ||
|
|
883d401ae3 | ||
|
|
f85702dc23 | ||
|
|
084701fcd9 | ||
|
|
5eb1d2a5bb | ||
|
|
98d17cdb50 | ||
|
|
2047a600ed | ||
|
|
a98c49b59a | ||
|
|
1048e3f82d | ||
|
|
c480bc339e | ||
|
|
a0af20f20f | ||
|
|
748a439d7a | ||
|
|
0a55bec305 | ||
|
|
2ab71df139 | ||
|
|
753b31b52a | ||
|
|
fcc875ab85 | ||
|
|
57263bd10e | ||
|
|
bf82318ad9 | ||
|
|
1ca86d09d1 |
2
.cr.yaml
2
.cr.yaml
@@ -1 +1,3 @@
|
||||
release-name-template: chart-{{ .Version }}
|
||||
make-release-latest: false
|
||||
skip-existing: true
|
||||
|
||||
12
.github/workflows/chart.yml
vendored
12
.github/workflows/chart.yml
vendored
@@ -2,9 +2,6 @@ name: Chart
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
tags:
|
||||
- "chart-*"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -18,15 +15,6 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check tag
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
pushed_tag=$(echo ${{ github.ref_name }} | sed "s/chart-//")
|
||||
chart_tag=$(yq .version charts/k3k/Chart.yaml)
|
||||
|
||||
echo pushed_tag=${pushed_tag} chart_tag=${chart_tag}
|
||||
[ "${pushed_tag}" == "${chart_tag}" ]
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
|
||||
10
.github/workflows/test-conformance.yaml
vendored
10
.github/workflows/test-conformance.yaml
vendored
@@ -113,6 +113,7 @@ jobs:
|
||||
namespace: k3k-mycluster
|
||||
spec:
|
||||
servers: 2
|
||||
mirrorHostNodes: true
|
||||
tlsSANs:
|
||||
- "127.0.0.1"
|
||||
expose:
|
||||
@@ -156,7 +157,6 @@ jobs:
|
||||
|
||||
sigs:
|
||||
runs-on: ubuntu-latest
|
||||
if: inputs.test == '' || inputs.test != 'conformance'
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -184,6 +184,12 @@ jobs:
|
||||
focus: '\[sig-storage\].*\[Conformance\]'
|
||||
|
||||
steps:
|
||||
- name: Validate input and fail fast
|
||||
if: inputs.test != '' && inputs.test != matrix.tests.name
|
||||
run: |
|
||||
echo "Failing this job as it's not the intended target."
|
||||
exit 1
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -260,6 +266,7 @@ jobs:
|
||||
namespace: k3k-mycluster
|
||||
spec:
|
||||
servers: 2
|
||||
mirrorHostNodes: true
|
||||
tlsSANs:
|
||||
- "127.0.0.1"
|
||||
expose:
|
||||
@@ -279,7 +286,6 @@ jobs:
|
||||
kubectl get pods -A
|
||||
|
||||
- name: Run sigs tests
|
||||
if: inputs.test == '' || inputs.test == matrix.tests.name
|
||||
run: |
|
||||
FOCUS="${{ matrix.tests.focus }}"
|
||||
echo "Running with --focus=${FOCUS}"
|
||||
|
||||
113
.github/workflows/test.yaml
vendored
113
.github/workflows/test.yaml
vendored
@@ -11,7 +11,7 @@ permissions:
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -21,12 +21,12 @@ jobs:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
args: --timeout=5m
|
||||
version: v1.64
|
||||
version: v2.3.0
|
||||
|
||||
tests:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -36,15 +36,35 @@ jobs:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Run unit tests
|
||||
run: make test-unit
|
||||
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: unit
|
||||
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -56,13 +76,10 @@ jobs:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
make build
|
||||
@@ -77,16 +94,86 @@ jobs:
|
||||
- name: Run e2e tests
|
||||
run: make test-e2e
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3s-logs
|
||||
name: e2e-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3k-logs
|
||||
name: e2e-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
|
||||
tests-cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Set coverage environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run cli tests
|
||||
run: make test-cli
|
||||
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${{ github.workspace }}/covdata/cover.out
|
||||
flags: cli
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: cli-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: cli-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -8,3 +8,6 @@
|
||||
__debug*
|
||||
*-kubeconfig.yaml
|
||||
.envtest
|
||||
cover.out
|
||||
covcounters.**
|
||||
covmeta.**
|
||||
|
||||
@@ -1,13 +1,27 @@
|
||||
version: "2"
|
||||
|
||||
linters:
|
||||
enable:
|
||||
# default linters
|
||||
- errcheck
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- unused
|
||||
- misspell
|
||||
- wsl_v5
|
||||
|
||||
# extra
|
||||
- misspell
|
||||
- wsl
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
settings:
|
||||
gci:
|
||||
# The default order is `standard > default > custom > blank > dot > alias > localmodule`.
|
||||
custom-order: true
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- alias
|
||||
- localmodule
|
||||
- dot
|
||||
- blank
|
||||
gofmt:
|
||||
rewrite-rules:
|
||||
- pattern: 'interface{}'
|
||||
replacement: 'any'
|
||||
|
||||
30
Makefile
30
Makefile
@@ -1,16 +1,18 @@
|
||||
|
||||
REPO ?= rancher
|
||||
COVERAGE ?= false
|
||||
VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
|
||||
|
||||
## Dependencies
|
||||
|
||||
GOLANGCI_LINT_VERSION := v1.64.8
|
||||
GOLANGCI_LINT_VERSION := v2.3.0
|
||||
GINKGO_VERSION ?= v2.21.0
|
||||
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
|
||||
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
|
||||
ENVTEST_K8S_VERSION := 1.31.0
|
||||
CRD_REF_DOCS_VER ?= v0.1.0
|
||||
|
||||
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
|
||||
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
|
||||
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
|
||||
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
|
||||
@@ -28,7 +30,7 @@ version: ## Print the current version
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build the the K3k binaries (k3k, k3k-kubelet and k3kcli)
|
||||
@VERSION=$(VERSION) ./scripts/build
|
||||
@VERSION=$(VERSION) COVERAGE=$(COVERAGE) ./scripts/build
|
||||
|
||||
.PHONY: package
|
||||
package: package-k3k package-k3k-kubelet ## Package the k3k and k3k-kubelet Docker images
|
||||
@@ -51,19 +53,27 @@ push-%:
|
||||
|
||||
.PHONY: test
|
||||
test: ## Run all the tests
|
||||
$(GINKGO) -v -r --label-filter=$(label-filter)
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=$(label-filter)
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## Run the unit tests (skips the e2e)
|
||||
$(GINKGO) -v -r --skip-file=tests/*
|
||||
$(GINKGO) $(GINKGO_FLAGS) --skip-file=tests/*
|
||||
|
||||
.PHONY: test-controller
|
||||
test-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) -v -r pkg/controller
|
||||
$(GINKGO) $(GINKGO_FLAGS) pkg/controller
|
||||
|
||||
.PHONY: test-kubelet-controller
|
||||
test-kubelet-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) $(GINKGO_FLAGS) k3k-kubelet/controller
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: ## Run the e2e tests
|
||||
$(GINKGO) -v -r tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=e2e tests
|
||||
|
||||
.PHONY: test-cli
|
||||
test-cli: ## Run the cli tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=cli --flake-attempts=3 tests
|
||||
|
||||
.PHONY: generate
|
||||
generate: ## Generate the CRDs specs
|
||||
@@ -81,8 +91,12 @@ docs: ## Build the CRDs and CLI docs
|
||||
lint: ## Find any linting issues in the project
|
||||
$(GOLANGCI_LINT) run --timeout=5m
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Find any linting issues in the project
|
||||
$(GOLANGCI_LINT) fmt ./...
|
||||
|
||||
.PHONY: validate
|
||||
validate: generate docs ## Validate the project checking for any dependency or doc mismatch
|
||||
validate: generate docs fmt ## Validate the project checking for any dependency or doc mismatch
|
||||
$(GINKGO) unfocus
|
||||
go mod tidy
|
||||
git status --porcelain
|
||||
|
||||
@@ -71,7 +71,7 @@ To install it, simply download the latest available version for your architectur
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.2/k3kcli-linux-amd64 && \
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.3/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
@@ -79,7 +79,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.2/k3kcli-l
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli Version: v0.3.2
|
||||
k3kcli Version: v0.3.3
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 0.3.3
|
||||
appVersion: v0.3.3
|
||||
version: 0.3.4-rc2
|
||||
appVersion: v0.3.4-rc2
|
||||
|
||||
@@ -18,6 +18,9 @@ spec:
|
||||
- jsonPath: .spec.mode
|
||||
name: Mode
|
||||
type: string
|
||||
- jsonPath: .status.phase
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .status.policyName
|
||||
name: Policy
|
||||
type: string
|
||||
@@ -220,6 +223,89 @@ spec:
|
||||
x-kubernetes-validations:
|
||||
- message: clusterDNS is immutable
|
||||
rule: self == oldSelf
|
||||
customCAs:
|
||||
description: CustomCAs specifies the cert/key pairs for custom CA
|
||||
certificates.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled toggles this feature on or off.
|
||||
type: boolean
|
||||
sources:
|
||||
description: Sources defines the sources for all required custom
|
||||
CA certificates.
|
||||
properties:
|
||||
clientCA:
|
||||
description: ClientCA specifies the client-ca cert/key pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
etcdPeerCA:
|
||||
description: ETCDPeerCA specifies the etcd-peer-ca cert/key
|
||||
pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
etcdServerCA:
|
||||
description: ETCDServerCA specifies the etcd-server-ca cert/key
|
||||
pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
requestHeaderCA:
|
||||
description: RequestHeaderCA specifies the request-header-ca
|
||||
cert/key pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
serverCA:
|
||||
description: ServerCA specifies the server-ca cert/key pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
serviceAccountToken:
|
||||
description: ServiceAccountToken specifies the service-account-token
|
||||
key.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
SecretName specifies the name of an existing secret to use.
|
||||
The controller expects specific keys inside based on the credential type:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
expose:
|
||||
description: |-
|
||||
Expose specifies options for exposing the API server.
|
||||
@@ -279,6 +365,11 @@ spec:
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
mirrorHostNodes:
|
||||
description: |-
|
||||
MirrorHostNodes controls whether node objects from the host cluster
|
||||
are mirrored into the virtual cluster.
|
||||
type: boolean
|
||||
mode:
|
||||
allOf:
|
||||
- enum:
|
||||
@@ -303,8 +394,6 @@ spec:
|
||||
In "shared" mode, this also applies to workloads.
|
||||
type: object
|
||||
persistence:
|
||||
default:
|
||||
type: dynamic
|
||||
description: |-
|
||||
Persistence specifies options for persisting etcd data.
|
||||
Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
|
||||
@@ -316,6 +405,7 @@ spec:
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
storageRequestSize:
|
||||
default: 1G
|
||||
description: |-
|
||||
StorageRequestSize is the requested size for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
@@ -324,8 +414,6 @@ spec:
|
||||
default: dynamic
|
||||
description: Type specifies the persistence mode.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
priorityClass:
|
||||
description: |-
|
||||
@@ -524,6 +612,7 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
default: {}
|
||||
description: Status reflects the observed state of the Cluster.
|
||||
properties:
|
||||
clusterCIDR:
|
||||
@@ -532,29 +621,83 @@ spec:
|
||||
clusterDNS:
|
||||
description: ClusterDNS is the IP address for the CoreDNS service.
|
||||
type: string
|
||||
conditions:
|
||||
description: Conditions are the individual conditions for the cluster
|
||||
set.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
hostVersion:
|
||||
description: HostVersion is the Kubernetes version of the host node.
|
||||
type: string
|
||||
persistence:
|
||||
description: Persistence specifies options for persisting etcd data.
|
||||
properties:
|
||||
storageClassName:
|
||||
description: |-
|
||||
StorageClassName is the name of the StorageClass to use for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
storageRequestSize:
|
||||
description: |-
|
||||
StorageRequestSize is the requested size for the PVC.
|
||||
This field is only relevant in "dynamic" mode.
|
||||
type: string
|
||||
type:
|
||||
default: dynamic
|
||||
description: Type specifies the persistence mode.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
kubeletPort:
|
||||
description: KubeletPort specefies the port used by k3k-kubelet in
|
||||
shared mode.
|
||||
type: integer
|
||||
phase:
|
||||
default: Unknown
|
||||
description: Phase is a high-level summary of the cluster's current
|
||||
lifecycle state.
|
||||
enum:
|
||||
- Pending
|
||||
- Provisioning
|
||||
- Ready
|
||||
- Failed
|
||||
- Terminating
|
||||
- Unknown
|
||||
type: string
|
||||
policyName:
|
||||
description: PolicyName specifies the virtual cluster policy name
|
||||
bound to the virtual cluster.
|
||||
@@ -568,6 +711,10 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
webhookPort:
|
||||
description: WebhookPort specefies the port used by webhook in k3k-kubelet
|
||||
in shared mode.
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
|
||||
@@ -30,6 +30,17 @@ spec:
|
||||
value: {{ .Values.k3sServer.image.repository }}
|
||||
- name: K3S_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.k3sServer.image.pullPolicy }}
|
||||
- name: KUBELET_PORT_RANGE
|
||||
value: {{ .Values.sharedAgent.kubeletPortRange }}
|
||||
- name: WEBHOOK_PORT_RANGE
|
||||
value: {{ .Values.sharedAgent.webhookPortRange }}
|
||||
- name: CONTROLLER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- with .Values.extraEnv }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: https
|
||||
|
||||
@@ -16,7 +16,7 @@ subjects:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "k3k.fullname" . }}-node-proxy
|
||||
name: k3k-kubelet-node
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -30,8 +30,29 @@ rules:
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "k3k.fullname" . }}-node-proxy
|
||||
name: k3k-kubelet-node
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "k3k.fullname" . }}-node-proxy
|
||||
name: k3k-kubelet-node
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: k3k-priorityclass
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "scheduling.k8s.io"
|
||||
resources:
|
||||
- "priorityclasses"
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: k3k-priorityclass
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: k3k-priorityclass
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -9,6 +9,19 @@ imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
|
||||
# This is useful for passing custom configuration or secrets to the controller.
|
||||
# For example:
|
||||
# extraEnv:
|
||||
# - name: MY_CUSTOM_VAR
|
||||
# value: "my_custom_value"
|
||||
# - name: ANOTHER_VAR
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: my-secret
|
||||
# key: my-key
|
||||
extraEnv: []
|
||||
|
||||
host:
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
|
||||
# the controller will collect the PodCIDRs of all the nodes on the system.
|
||||
@@ -23,6 +36,10 @@ serviceAccount:
|
||||
|
||||
# configuration related to the shared agent mode in k3k
|
||||
sharedAgent:
|
||||
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
|
||||
kubeletPortRange: "50000-51000"
|
||||
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
|
||||
webhookPortRange: "51001-52000"
|
||||
image:
|
||||
repository: "rancher/k3k-kubelet"
|
||||
tag: ""
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewClusterCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "cluster",
|
||||
Usage: "cluster command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewClusterCreateCmd(appCtx),
|
||||
NewClusterDeleteCmd(appCtx),
|
||||
NewClusterListCmd(appCtx),
|
||||
},
|
||||
func NewClusterCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "cluster command",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
NewClusterCreateCmd(appCtx),
|
||||
NewClusterDeleteCmd(appCtx),
|
||||
NewClusterListCmd(appCtx),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -3,22 +3,28 @@ package cmds
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
)
|
||||
|
||||
type CreateConfig struct {
|
||||
@@ -27,10 +33,10 @@ type CreateConfig struct {
|
||||
serviceCIDR string
|
||||
servers int
|
||||
agents int
|
||||
serverArgs cli.StringSlice
|
||||
agentArgs cli.StringSlice
|
||||
serverEnvs cli.StringSlice
|
||||
agentEnvs cli.StringSlice
|
||||
serverArgs []string
|
||||
agentArgs []string
|
||||
serverEnvs []string
|
||||
agentEnvs []string
|
||||
persistenceType string
|
||||
storageClassName string
|
||||
storageRequestSize string
|
||||
@@ -38,35 +44,36 @@ type CreateConfig struct {
|
||||
mode string
|
||||
kubeconfigServerHost string
|
||||
policy string
|
||||
mirrorHostNodes bool
|
||||
customCertsPath string
|
||||
}
|
||||
|
||||
func NewClusterCreateCmd(appCtx *AppContext) *cli.Command {
|
||||
func NewClusterCreateCmd(appCtx *AppContext) *cobra.Command {
|
||||
createConfig := &CreateConfig{}
|
||||
|
||||
flags := CommonFlags(appCtx)
|
||||
flags = append(flags, FlagNamespace(appCtx))
|
||||
flags = append(flags, newCreateFlags(createConfig)...)
|
||||
|
||||
return &cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create new cluster",
|
||||
UsageText: "k3kcli cluster create [command options] NAME",
|
||||
Action: createAction(appCtx, createConfig),
|
||||
Flags: flags,
|
||||
HideHelpCommand: true,
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create new cluster",
|
||||
Example: "k3kcli cluster create [command options] NAME",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return validateCreateConfig(createConfig)
|
||||
},
|
||||
RunE: createAction(appCtx, createConfig),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
|
||||
CobraFlagNamespace(appCtx, cmd.Flags())
|
||||
createFlags(cmd, createConfig)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
name := args[0]
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
@@ -83,7 +90,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
|
||||
|
||||
if strings.Contains(config.version, "+") {
|
||||
orig := config.version
|
||||
config.version = strings.Replace(config.version, "+", "-", -1)
|
||||
config.version = strings.ReplaceAll(config.version, "+", "-")
|
||||
logrus.Warnf("Invalid K3s docker reference version: '%s'. Using '%s' instead", orig, config.version)
|
||||
}
|
||||
|
||||
@@ -97,6 +104,12 @@ func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
|
||||
}
|
||||
}
|
||||
|
||||
if config.customCertsPath != "" {
|
||||
if err := CreateCustomCertsSecrets(ctx, name, namespace, config.customCertsPath, client); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace)
|
||||
|
||||
cluster := newCluster(name, namespace, config)
|
||||
@@ -126,9 +139,13 @@ func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
|
||||
logrus.Infof("Waiting for cluster to be available..")
|
||||
|
||||
logrus.Infof("waiting for cluster to be available..")
|
||||
if err := waitForCluster(ctx, client, cluster); err != nil {
|
||||
return fmt.Errorf("failed to wait for cluster to become ready (status: %s): %w", cluster.Status.Phase, err)
|
||||
}
|
||||
|
||||
logrus.Infof("Extracting Kubeconfig for [%s] cluster", name)
|
||||
|
||||
// retry every 5s for at most 2m, or 25 times
|
||||
availableBackoff := wait.Backoff{
|
||||
@@ -142,13 +159,13 @@ func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc {
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Generate(ctx, client, cluster, host[0])
|
||||
kubeconfig, err = cfg.Generate(ctx, client, cluster, host[0], 0)
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeKubeconfigFile(cluster, kubeconfig)
|
||||
return writeKubeconfigFile(cluster, kubeconfig, "")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -167,10 +184,10 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
|
||||
Agents: ptr.To(int32(config.agents)),
|
||||
ClusterCIDR: config.clusterCIDR,
|
||||
ServiceCIDR: config.serviceCIDR,
|
||||
ServerArgs: config.serverArgs.Value(),
|
||||
AgentArgs: config.agentArgs.Value(),
|
||||
ServerEnvs: env(config.serverEnvs.Value()),
|
||||
AgentEnvs: env(config.agentEnvs.Value()),
|
||||
ServerArgs: config.serverArgs,
|
||||
AgentArgs: config.agentArgs,
|
||||
ServerEnvs: env(config.serverEnvs),
|
||||
AgentEnvs: env(config.agentEnvs),
|
||||
Version: config.version,
|
||||
Mode: v1alpha1.ClusterMode(config.mode),
|
||||
Persistence: v1alpha1.PersistenceConfig{
|
||||
@@ -178,6 +195,7 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
|
||||
StorageClassName: ptr.To(config.storageClassName),
|
||||
StorageRequestSize: config.storageRequestSize,
|
||||
},
|
||||
MirrorHostNodes: config.mirrorHostNodes,
|
||||
},
|
||||
}
|
||||
if config.storageClassName == "" {
|
||||
@@ -191,6 +209,32 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
|
||||
}
|
||||
}
|
||||
|
||||
if config.customCertsPath != "" {
|
||||
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
|
||||
Enabled: true,
|
||||
Sources: v1alpha1.CredentialSources{
|
||||
ClientCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "client-ca"),
|
||||
},
|
||||
ServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "server-ca"),
|
||||
},
|
||||
ETCDServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-server-ca"),
|
||||
},
|
||||
ETCDPeerCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-peer-ca"),
|
||||
},
|
||||
RequestHeaderCA: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "request-header-ca"),
|
||||
},
|
||||
ServiceAccountToken: v1alpha1.CredentialSource{
|
||||
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "service-account-token"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return cluster
|
||||
}
|
||||
|
||||
@@ -211,3 +255,89 @@ func env(envSlice []string) []v1.EnvVar {
|
||||
|
||||
return envVars
|
||||
}
|
||||
|
||||
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alpha1.Cluster) error {
|
||||
interval := 5 * time.Second
|
||||
timeout := 2 * time.Minute
|
||||
|
||||
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
|
||||
key := client.ObjectKeyFromObject(cluster)
|
||||
if err := k8sClient.Get(ctx, key, cluster); err != nil {
|
||||
return false, fmt.Errorf("failed to get resource: %w", err)
|
||||
}
|
||||
|
||||
// If resource ready -> stop polling
|
||||
if cluster.Status.Phase == v1alpha1.ClusterReady {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If resource failed -> stop polling with an error
|
||||
if cluster.Status.Phase == v1alpha1.ClusterFailed {
|
||||
return true, fmt.Errorf("cluster creation failed: %s", cluster.Status.Phase)
|
||||
}
|
||||
|
||||
// Condition not met, continue polling.
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func CreateCustomCertsSecrets(ctx context.Context, name, namespace, customCertsPath string, k8sclient client.Client) error {
|
||||
customCAsMap := map[string]string{
|
||||
"etcd-peer-ca": "/etcd/peer-ca",
|
||||
"etcd-server-ca": "/etcd/server-ca",
|
||||
"server-ca": "/server-ca",
|
||||
"client-ca": "/client-ca",
|
||||
"request-header-ca": "/request-header-ca",
|
||||
"service-account-token": "/service",
|
||||
}
|
||||
|
||||
for certName, fileName := range customCAsMap {
|
||||
var (
|
||||
certFilePath, keyFilePath string
|
||||
cert, key []byte
|
||||
err error
|
||||
)
|
||||
|
||||
if certName != "service-account-token" {
|
||||
certFilePath = customCertsPath + fileName + ".crt"
|
||||
|
||||
cert, err = os.ReadFile(certFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
keyFilePath = customCertsPath + fileName + ".key"
|
||||
|
||||
key, err = os.ReadFile(keyFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certSecret := caCertSecret(certName, name, namespace, cert, key)
|
||||
|
||||
if err := k8sclient.Create(ctx, certSecret); err != nil {
|
||||
return client.IgnoreAlreadyExists(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func caCertSecret(certName, clusterName, clusterNamespace string, cert, key []byte) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controller.SafeConcatNameWithPrefix(clusterName, certName),
|
||||
Namespace: clusterNamespace,
|
||||
},
|
||||
Type: v1.SecretTypeTLS,
|
||||
Data: map[string][]byte{
|
||||
v1.TLSCertKey: cert,
|
||||
v1.TLSPrivateKeyKey: key,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,123 +3,59 @@ package cmds
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func newCreateFlags(config *CreateConfig) []cli.Flag {
|
||||
return []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "servers",
|
||||
Usage: "number of servers",
|
||||
Destination: &config.servers,
|
||||
Value: 1,
|
||||
Action: func(ctx *cli.Context, value int) error {
|
||||
if value <= 0 {
|
||||
return errors.New("invalid number of servers")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "agents",
|
||||
Usage: "number of agents",
|
||||
Destination: &config.agents,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "token",
|
||||
Usage: "token of the cluster",
|
||||
Destination: &config.token,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-cidr",
|
||||
Usage: "cluster CIDR",
|
||||
Destination: &config.clusterCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "service-cidr",
|
||||
Usage: "service CIDR",
|
||||
Destination: &config.serviceCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "persistence-type",
|
||||
Usage: "persistence mode for the nodes (dynamic, ephemeral, static)",
|
||||
Value: string(v1alpha1.DynamicPersistenceMode),
|
||||
Destination: &config.persistenceType,
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch v1alpha1.PersistenceMode(value) {
|
||||
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "storage-class-name",
|
||||
Usage: "storage class name for dynamic persistence type",
|
||||
Destination: &config.storageClassName,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "storage-request-size",
|
||||
Usage: "storage size for dynamic persistence type",
|
||||
Destination: &config.storageRequestSize,
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
if _, err := resource.ParseQuantity(value); err != nil {
|
||||
return errors.New(`invalid storage size, should be a valid resource quantity e.g "10Gi"`)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "server-args",
|
||||
Usage: "servers extra arguments",
|
||||
Destination: &config.serverArgs,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "agent-args",
|
||||
Usage: "agents extra arguments",
|
||||
Destination: &config.agentArgs,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "server-envs",
|
||||
Usage: "servers extra Envs",
|
||||
Destination: &config.serverEnvs,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "agent-envs",
|
||||
Usage: "agents extra Envs",
|
||||
Destination: &config.agentEnvs,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "version",
|
||||
Usage: "k3s version",
|
||||
Destination: &config.version,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "mode",
|
||||
Usage: "k3k mode type (shared, virtual)",
|
||||
Destination: &config.mode,
|
||||
Value: "shared",
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch value {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig-server",
|
||||
Usage: "override the kubeconfig server host",
|
||||
Destination: &config.kubeconfigServerHost,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "policy",
|
||||
Usage: "The policy to create the cluster in",
|
||||
Destination: &config.policy,
|
||||
},
|
||||
}
|
||||
func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
|
||||
cmd.Flags().IntVar(&cfg.servers, "servers", 1, "number of servers")
|
||||
cmd.Flags().IntVar(&cfg.agents, "agents", 0, "number of agents")
|
||||
cmd.Flags().StringVar(&cfg.token, "token", "", "token of the cluster")
|
||||
cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR")
|
||||
cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR")
|
||||
cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes")
|
||||
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1alpha1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
|
||||
cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type")
|
||||
cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type")
|
||||
cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments")
|
||||
cmd.Flags().StringSliceVar(&cfg.agentArgs, "agent-args", []string{}, "agents extra arguments")
|
||||
cmd.Flags().StringSliceVar(&cfg.serverEnvs, "server-envs", []string{}, "servers extra Envs")
|
||||
cmd.Flags().StringSliceVar(&cfg.agentEnvs, "agent-envs", []string{}, "agents extra Envs")
|
||||
cmd.Flags().StringVar(&cfg.version, "version", "", "k3s version")
|
||||
cmd.Flags().StringVar(&cfg.mode, "mode", "shared", "k3k mode type (shared, virtual)")
|
||||
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
|
||||
cmd.Flags().StringVar(&cfg.policy, "policy", "", "The policy to create the cluster in")
|
||||
cmd.Flags().StringVar(&cfg.customCertsPath, "custom-certs", "", "The path for custom certificate directory")
|
||||
}
|
||||
|
||||
func validateCreateConfig(cfg *CreateConfig) error {
|
||||
if cfg.servers <= 0 {
|
||||
return errors.New("invalid number of servers")
|
||||
}
|
||||
|
||||
if cfg.persistenceType != "" {
|
||||
switch v1alpha1.PersistenceMode(cfg.persistenceType) {
|
||||
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := resource.ParseQuantity(cfg.storageRequestSize); err != nil {
|
||||
return errors.New(`invalid storage size, should be a valid resource quantity e.g "10Gi"`)
|
||||
}
|
||||
|
||||
if cfg.mode != "" {
|
||||
switch cfg.mode {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,52 +4,44 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
)
|
||||
|
||||
var keepData bool
|
||||
|
||||
func NewClusterDeleteCmd(appCtx *AppContext) *cli.Command {
|
||||
flags := CommonFlags(appCtx)
|
||||
flags = append(flags, FlagNamespace(appCtx))
|
||||
flags = append(flags,
|
||||
&cli.BoolFlag{
|
||||
Name: "keep-data",
|
||||
Usage: "keeps persistence volumes created for the cluster after deletion",
|
||||
Destination: &keepData,
|
||||
},
|
||||
)
|
||||
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing cluster",
|
||||
UsageText: "k3kcli cluster delete [command options] NAME",
|
||||
Action: delete(appCtx),
|
||||
Flags: flags,
|
||||
HideHelpCommand: true,
|
||||
func NewClusterDeleteCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete an existing cluster",
|
||||
Example: "k3kcli cluster delete [command options] NAME",
|
||||
RunE: delete(appCtx),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
|
||||
CobraFlagNamespace(appCtx, cmd.Flags())
|
||||
cmd.Flags().BoolVar(&keepData, "keep-data", false, "keeps persistence volumes created for the cluster after deletion")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func delete(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
name := args[0]
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
if name == k3kcluster.ClusterInvalidName {
|
||||
return errors.New("invalid cluster name")
|
||||
}
|
||||
|
||||
@@ -3,37 +3,35 @@ package cmds
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/urfave/cli/v2"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func NewClusterListCmd(appCtx *AppContext) *cli.Command {
|
||||
flags := CommonFlags(appCtx)
|
||||
flags = append(flags, FlagNamespace(appCtx))
|
||||
|
||||
return &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "List all the existing cluster",
|
||||
UsageText: "k3kcli cluster list [command options]",
|
||||
Action: list(appCtx),
|
||||
Flags: flags,
|
||||
HideHelpCommand: true,
|
||||
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the existing cluster",
|
||||
Example: "k3kcli cluster list [command options]",
|
||||
RunE: list(appCtx),
|
||||
Args: cobra.NoArgs,
|
||||
}
|
||||
|
||||
CobraFlagNamespace(appCtx, cmd.Flags())
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func list(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
func list(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() > 0 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
var clusters v1alpha1.ClusterList
|
||||
if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil {
|
||||
return err
|
||||
@@ -49,6 +47,6 @@ func list(appCtx *AppContext) cli.ActionFunc {
|
||||
|
||||
printer := printers.NewTablePrinter(printers.PrintOptions{WithNamespace: true})
|
||||
|
||||
return printer.PrintObj(table, clx.App.Writer)
|
||||
return printer.PrintObj(table, cmd.OutOrStdout())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,105 +8,79 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
var (
|
||||
type GenerateKubeconfigConfig struct {
|
||||
name string
|
||||
cn string
|
||||
org cli.StringSlice
|
||||
altNames cli.StringSlice
|
||||
expirationDays int64
|
||||
configName string
|
||||
cn string
|
||||
org []string
|
||||
altNames []string
|
||||
expirationDays int64
|
||||
kubeconfigServerHost string
|
||||
)
|
||||
|
||||
func newGenerateKubeconfigFlags(appCtx *AppContext) []cli.Flag {
|
||||
return []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "cluster name",
|
||||
Destination: &name,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config-name",
|
||||
Usage: "the name of the generated kubeconfig file",
|
||||
Destination: &configName,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cn",
|
||||
Usage: "Common name (CN) of the generated certificates for the kubeconfig",
|
||||
Destination: &cn,
|
||||
Value: controller.AdminCommonName,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "org",
|
||||
Usage: "Organization name (ORG) of the generated certificates for the kubeconfig",
|
||||
Value: &org,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "altNames",
|
||||
Usage: "altNames of the generated certificates for the kubeconfig",
|
||||
Value: &altNames,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
Name: "expiration-days",
|
||||
Usage: "Expiration date of the certificates used for the kubeconfig",
|
||||
Destination: &expirationDays,
|
||||
Value: 356,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig-server",
|
||||
Usage: "override the kubeconfig server host",
|
||||
Destination: &kubeconfigServerHost,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewKubeconfigCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "kubeconfig",
|
||||
Usage: "Manage kubeconfig for clusters",
|
||||
Subcommands: []*cli.Command{
|
||||
NewKubeconfigGenerateCmd(appCtx),
|
||||
},
|
||||
func NewKubeconfigCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "kubeconfig",
|
||||
Short: "Manage kubeconfig for clusters",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
NewKubeconfigGenerateCmd(appCtx),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cli.Command {
|
||||
flags := CommonFlags(appCtx)
|
||||
flags = append(flags, FlagNamespace(appCtx))
|
||||
flags = append(flags, newGenerateKubeconfigFlags(appCtx)...)
|
||||
func NewKubeconfigGenerateCmd(appCtx *AppContext) *cobra.Command {
|
||||
cfg := &GenerateKubeconfigConfig{}
|
||||
|
||||
return &cli.Command{
|
||||
Name: "generate",
|
||||
Usage: "Generate kubeconfig for clusters",
|
||||
SkipFlagParsing: false,
|
||||
Action: generate(appCtx),
|
||||
Flags: flags,
|
||||
cmd := &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate kubeconfig for clusters",
|
||||
RunE: generate(appCtx, cfg),
|
||||
Args: cobra.NoArgs,
|
||||
}
|
||||
|
||||
CobraFlagNamespace(appCtx, cmd.Flags())
|
||||
generateKubeconfigFlags(cmd, cfg)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func generate(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
func generateKubeconfigFlags(cmd *cobra.Command, cfg *GenerateKubeconfigConfig) {
|
||||
cmd.Flags().StringVar(&cfg.name, "name", "", "cluster name")
|
||||
cmd.Flags().StringVar(&cfg.configName, "config-name", "", "the name of the generated kubeconfig file")
|
||||
cmd.Flags().StringVar(&cfg.cn, "cn", controller.AdminCommonName, "Common name (CN) of the generated certificates for the kubeconfig")
|
||||
cmd.Flags().StringSliceVar(&cfg.org, "org", nil, "Organization name (ORG) of the generated certificates for the kubeconfig")
|
||||
cmd.Flags().StringSliceVar(&cfg.altNames, "altNames", nil, "altNames of the generated certificates for the kubeconfig")
|
||||
cmd.Flags().Int64Var(&cfg.expirationDays, "expiration-days", 365, "Expiration date of the certificates used for the kubeconfig")
|
||||
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
|
||||
}
|
||||
|
||||
func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
clusterKey := types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: appCtx.Namespace(name),
|
||||
Name: cfg.name,
|
||||
Namespace: appCtx.Namespace(cfg.name),
|
||||
}
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
@@ -121,25 +95,21 @@ func generate(appCtx *AppContext) cli.ActionFunc {
|
||||
}
|
||||
|
||||
host := strings.Split(url.Host, ":")
|
||||
if kubeconfigServerHost != "" {
|
||||
host = []string{kubeconfigServerHost}
|
||||
|
||||
if err := altNames.Set(kubeconfigServerHost); err != nil {
|
||||
return err
|
||||
}
|
||||
if cfg.kubeconfigServerHost != "" {
|
||||
host = []string{cfg.kubeconfigServerHost}
|
||||
cfg.altNames = append(cfg.altNames, cfg.kubeconfigServerHost)
|
||||
}
|
||||
|
||||
certAltNames := certs.AddSANs(altNames.Value())
|
||||
certAltNames := certs.AddSANs(cfg.altNames)
|
||||
|
||||
orgs := org.Value()
|
||||
if orgs == nil {
|
||||
orgs = []string{user.SystemPrivilegedGroup}
|
||||
if len(cfg.org) == 0 {
|
||||
cfg.org = []string{user.SystemPrivilegedGroup}
|
||||
}
|
||||
|
||||
cfg := kubeconfig.KubeConfig{
|
||||
CN: cn,
|
||||
ORG: orgs,
|
||||
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
|
||||
kubeCfg := kubeconfig.KubeConfig{
|
||||
CN: cfg.cn,
|
||||
ORG: cfg.org,
|
||||
ExpiryDate: time.Hour * 24 * time.Duration(cfg.expirationDays),
|
||||
AltNames: certAltNames,
|
||||
}
|
||||
|
||||
@@ -148,17 +118,17 @@ func generate(appCtx *AppContext) cli.ActionFunc {
|
||||
var kubeconfig *clientcmdapi.Config
|
||||
|
||||
if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error {
|
||||
kubeconfig, err = cfg.Generate(ctx, client, &cluster, host[0])
|
||||
kubeconfig, err = kubeCfg.Generate(ctx, client, &cluster, host[0], 0)
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeKubeconfigFile(&cluster, kubeconfig)
|
||||
return writeKubeconfigFile(&cluster, kubeconfig, cfg.configName)
|
||||
}
|
||||
}
|
||||
|
||||
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config) error {
|
||||
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
|
||||
if configName == "" {
|
||||
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
|
||||
}
|
||||
@@ -179,5 +149,5 @@ func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Con
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(configName, kubeconfigData, 0644)
|
||||
return os.WriteFile(configName, kubeconfigData, 0o644)
|
||||
}
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewPolicyCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "policy",
|
||||
Usage: "policy command",
|
||||
Subcommands: []*cli.Command{
|
||||
NewPolicyCreateCmd(appCtx),
|
||||
NewPolicyDeleteCmd(appCtx),
|
||||
NewPolicyListCmd(appCtx),
|
||||
},
|
||||
func NewPolicyCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "policy",
|
||||
Short: "policy command",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
NewPolicyCreateCmd(appCtx),
|
||||
NewPolicyDeleteCmd(appCtx),
|
||||
NewPolicyListCmd(appCtx),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -4,62 +4,52 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
)
|
||||
|
||||
type VirtualClusterPolicyCreateConfig struct {
|
||||
mode string
|
||||
}
|
||||
|
||||
func NewPolicyCreateCmd(appCtx *AppContext) *cli.Command {
|
||||
func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
|
||||
config := &VirtualClusterPolicyCreateConfig{}
|
||||
|
||||
flags := CommonFlags(appCtx)
|
||||
flags = append(flags,
|
||||
&cli.StringFlag{
|
||||
Name: "mode",
|
||||
Usage: "The allowed mode type of the policy",
|
||||
Destination: &config.mode,
|
||||
Value: "shared",
|
||||
Action: func(ctx *cli.Context, value string) error {
|
||||
switch value {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
},
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create new policy",
|
||||
Example: "k3kcli policy create [command options] NAME",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
switch config.mode {
|
||||
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
|
||||
return nil
|
||||
default:
|
||||
return errors.New(`mode should be one of "shared" or "virtual"`)
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
return &cli.Command{
|
||||
Name: "create",
|
||||
Usage: "Create new policy",
|
||||
UsageText: "k3kcli policy create [command options] NAME",
|
||||
Action: policyCreateAction(appCtx, config),
|
||||
Flags: flags,
|
||||
HideHelpCommand: true,
|
||||
RunE: policyCreateAction(appCtx, config),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&config.mode, "mode", "shared", "The allowed mode type of the policy")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateConfig) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateConfig) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
policyName := clx.Args().First()
|
||||
policyName := args[0]
|
||||
|
||||
_, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName)
|
||||
|
||||
|
||||
@@ -3,33 +3,29 @@ package cmds
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func NewPolicyDeleteCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete an existing policy",
|
||||
UsageText: "k3kcli policy delete [command options] NAME",
|
||||
Action: policyDeleteAction(appCtx),
|
||||
Flags: CommonFlags(appCtx),
|
||||
HideHelpCommand: true,
|
||||
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete an existing policy",
|
||||
Example: "k3kcli policy delete [command options] NAME",
|
||||
RunE: policyDeleteAction(appCtx),
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
}
|
||||
|
||||
func policyDeleteAction(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() != 1 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
name := clx.Args().First()
|
||||
name := args[0]
|
||||
|
||||
policy := &v1alpha1.VirtualClusterPolicy{}
|
||||
policy.Name = name
|
||||
|
||||
@@ -3,33 +3,30 @@ package cmds
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/urfave/cli/v2"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func NewPolicyListCmd(appCtx *AppContext) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "List all the existing policies",
|
||||
UsageText: "k3kcli policy list [command options]",
|
||||
Action: policyList(appCtx),
|
||||
Flags: CommonFlags(appCtx),
|
||||
HideHelpCommand: true,
|
||||
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the existing policies",
|
||||
Example: "k3kcli policy list [command options]",
|
||||
RunE: policyList(appCtx),
|
||||
Args: cobra.NoArgs,
|
||||
}
|
||||
}
|
||||
|
||||
func policyList(appCtx *AppContext) cli.ActionFunc {
|
||||
return func(clx *cli.Context) error {
|
||||
func policyList(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
client := appCtx.Client
|
||||
|
||||
if clx.NArg() > 0 {
|
||||
return cli.ShowSubcommandHelp(clx)
|
||||
}
|
||||
|
||||
var policies v1alpha1.VirtualClusterPolicyList
|
||||
if err := client.List(ctx, &policies); err != nil {
|
||||
return err
|
||||
@@ -45,6 +42,6 @@ func policyList(appCtx *AppContext) cli.ActionFunc {
|
||||
|
||||
printer := printers.NewTablePrinter(printers.PrintOptions{})
|
||||
|
||||
return printer.PrintObj(table, clx.App.Writer)
|
||||
return printer.PrintObj(table, cmd.OutOrStdout())
|
||||
}
|
||||
}
|
||||
|
||||
120
cli/cmds/root.go
120
cli/cmds/root.go
@@ -2,17 +2,22 @@ package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
)
|
||||
|
||||
type AppContext struct {
|
||||
@@ -25,52 +30,53 @@ type AppContext struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
func NewApp() *cli.App {
|
||||
func NewRootCmd() *cobra.Command {
|
||||
appCtx := &AppContext{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3kcli"
|
||||
app.Usage = "CLI for K3K"
|
||||
app.Flags = CommonFlags(appCtx)
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "k3kcli",
|
||||
Short: "CLI for K3K",
|
||||
Version: buildinfo.Version,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
InitializeConfig(cmd)
|
||||
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
if appCtx.Debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if appCtx.Debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
restConfig, err := loadRESTConfig(appCtx.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
_ = apiextensionsv1.AddToScheme(scheme)
|
||||
scheme := runtime.NewScheme()
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
_ = apiextensionsv1.AddToScheme(scheme)
|
||||
|
||||
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
appCtx.RestConfig = restConfig
|
||||
appCtx.Client = ctrlClient
|
||||
appCtx.RestConfig = restConfig
|
||||
appCtx.Client = ctrlClient
|
||||
|
||||
return nil
|
||||
return nil
|
||||
},
|
||||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
app.Version = buildinfo.Version
|
||||
cli.VersionPrinter = func(cCtx *cli.Context) {
|
||||
fmt.Println("k3kcli Version: " + buildinfo.Version)
|
||||
}
|
||||
rootCmd.PersistentFlags().StringVar(&appCtx.Kubeconfig, "kubeconfig", "", "kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)")
|
||||
rootCmd.PersistentFlags().BoolVar(&appCtx.Debug, "debug", false, "Turn on debug logs")
|
||||
|
||||
app.Commands = []*cli.Command{
|
||||
rootCmd.AddCommand(
|
||||
NewClusterCmd(appCtx),
|
||||
NewPolicyCmd(appCtx),
|
||||
NewKubeconfigCmd(appCtx),
|
||||
}
|
||||
)
|
||||
|
||||
return app
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func (ctx *AppContext) Namespace(name string) string {
|
||||
@@ -94,36 +100,20 @@ func loadRESTConfig(kubeconfig string) (*rest.Config, error) {
|
||||
return kubeConfig.ClientConfig()
|
||||
}
|
||||
|
||||
func CommonFlags(appCtx *AppContext) []cli.Flag {
|
||||
return []cli.Flag{
|
||||
FlagDebug(appCtx),
|
||||
FlagKubeconfig(appCtx),
|
||||
}
|
||||
func CobraFlagNamespace(appCtx *AppContext, flag *pflag.FlagSet) {
|
||||
flag.StringVarP(&appCtx.namespace, "namespace", "n", "", "namespace of the k3k cluster")
|
||||
}
|
||||
|
||||
func FlagDebug(appCtx *AppContext) *cli.BoolFlag {
|
||||
return &cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Turn on debug logs",
|
||||
Destination: &appCtx.Debug,
|
||||
EnvVars: []string{"K3K_DEBUG"},
|
||||
}
|
||||
}
|
||||
func InitializeConfig(cmd *cobra.Command) {
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
func FlagKubeconfig(appCtx *AppContext) *cli.StringFlag {
|
||||
return &cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
Usage: "kubeconfig path",
|
||||
Destination: &appCtx.Kubeconfig,
|
||||
DefaultText: "$HOME/.kube/config or $KUBECONFIG if set",
|
||||
}
|
||||
}
|
||||
|
||||
func FlagNamespace(appCtx *AppContext) *cli.StringFlag {
|
||||
return &cli.StringFlag{
|
||||
Name: "namespace",
|
||||
Usage: "namespace of the k3k cluster",
|
||||
Aliases: []string{"n"},
|
||||
Destination: &appCtx.namespace,
|
||||
}
|
||||
// Bind the current command's flags to viper
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
// Apply the viper config value to the flag when the flag is not set and viper has a value
|
||||
if !f.Changed && viper.IsSet(f.Name) {
|
||||
val := viper.Get(f.Name)
|
||||
_ = cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/util/jsonpath"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// createTable creates a table to print from the printerColumn defined in the CRD spec, plus the name at the beginning
|
||||
@@ -93,7 +94,7 @@ func buildRowCells(objMap map[string]any, printerColumns []apiextensionsv1.Custo
|
||||
}
|
||||
|
||||
func toPointerSlice[T any](v []T) []*T {
|
||||
var vPtr = make([]*T, len(v))
|
||||
vPtr := make([]*T, len(v))
|
||||
|
||||
for i := range v {
|
||||
vPtr[i] = &v[i]
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cmds.NewApp()
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
app := cmds.NewRootCmd()
|
||||
if err := app.Execute(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
# NAME
|
||||
|
||||
k3kcli - CLI for K3K
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
k3kcli
|
||||
|
||||
```
|
||||
[--debug]
|
||||
[--kubeconfig]=[value]
|
||||
```
|
||||
|
||||
**Usage**:
|
||||
|
||||
```
|
||||
k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
|
||||
```
|
||||
|
||||
# GLOBAL OPTIONS
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
|
||||
# COMMANDS
|
||||
|
||||
## cluster
|
||||
|
||||
cluster command
|
||||
|
||||
### create
|
||||
|
||||
Create new cluster
|
||||
|
||||
>k3kcli cluster create [command options] NAME
|
||||
|
||||
**--agent-args**="": agents extra arguments
|
||||
|
||||
**--agent-envs**="": agents extra Envs
|
||||
|
||||
**--agents**="": number of agents (default: 0)
|
||||
|
||||
**--cluster-cidr**="": cluster CIDR
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--kubeconfig-server**="": override the kubeconfig server host
|
||||
|
||||
**--mode**="": k3k mode type (shared, virtual) (default: "shared")
|
||||
|
||||
**--namespace, -n**="": namespace of the k3k cluster
|
||||
|
||||
**--persistence-type**="": persistence mode for the nodes (dynamic, ephemeral, static) (default: "dynamic")
|
||||
|
||||
**--policy**="": The policy to create the cluster in
|
||||
|
||||
**--server-args**="": servers extra arguments
|
||||
|
||||
**--server-envs**="": servers extra Envs
|
||||
|
||||
**--servers**="": number of servers (default: 1)
|
||||
|
||||
**--service-cidr**="": service CIDR
|
||||
|
||||
**--storage-class-name**="": storage class name for dynamic persistence type
|
||||
|
||||
**--storage-request-size**="": storage size for dynamic persistence type
|
||||
|
||||
**--token**="": token of the cluster
|
||||
|
||||
**--version**="": k3s version
|
||||
|
||||
### delete
|
||||
|
||||
Delete an existing cluster
|
||||
|
||||
>k3kcli cluster delete [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--keep-data**: keeps persistence volumes created for the cluster after deletion
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace, -n**="": namespace of the k3k cluster
|
||||
|
||||
### list
|
||||
|
||||
List all the existing cluster
|
||||
|
||||
>k3kcli cluster list [command options]
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--namespace, -n**="": namespace of the k3k cluster
|
||||
|
||||
## policy
|
||||
|
||||
policy command
|
||||
|
||||
### create
|
||||
|
||||
Create new policy
|
||||
|
||||
>k3kcli policy create [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--mode**="": The allowed mode type of the policy (default: "shared")
|
||||
|
||||
### delete
|
||||
|
||||
Delete an existing policy
|
||||
|
||||
>k3kcli policy delete [command options] NAME
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
### list
|
||||
|
||||
List all the existing policies
|
||||
|
||||
>k3kcli policy list [command options]
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
## kubeconfig
|
||||
|
||||
Manage kubeconfig for clusters
|
||||
|
||||
### generate
|
||||
|
||||
Generate kubeconfig for clusters
|
||||
|
||||
**--altNames**="": altNames of the generated certificates for the kubeconfig
|
||||
|
||||
**--cn**="": Common name (CN) of the generated certificates for the kubeconfig (default: "system:admin")
|
||||
|
||||
**--config-name**="": the name of the generated kubeconfig file
|
||||
|
||||
**--debug**: Turn on debug logs
|
||||
|
||||
**--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356)
|
||||
|
||||
**--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set)
|
||||
|
||||
**--kubeconfig-server**="": override the kubeconfig server host
|
||||
|
||||
**--name**="": cluster name
|
||||
|
||||
**--namespace, -n**="": namespace of the k3k cluster
|
||||
|
||||
**--org**="": Organization name (ORG) of the generated certificates for the kubeconfig
|
||||
@@ -5,19 +5,14 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/spf13/cobra/doc"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Instantiate the CLI application
|
||||
app := cmds.NewApp()
|
||||
|
||||
// Generate the Markdown documentation
|
||||
md, err := app.ToMarkdown()
|
||||
if err != nil {
|
||||
fmt.Println("Error generating documentation:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
k3kcli := cmds.NewRootCmd()
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
@@ -25,13 +20,12 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
outputFile := path.Join(wd, "docs/cli/cli-docs.md")
|
||||
outputDir := path.Join(wd, "docs/cli")
|
||||
|
||||
err = os.WriteFile(outputFile, []byte(md), 0644)
|
||||
if err != nil {
|
||||
if err := doc.GenMarkdownTree(k3kcli, outputDir); err != nil {
|
||||
fmt.Println("Error generating documentation:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("Documentation generated at " + outputFile)
|
||||
fmt.Println("Documentation generated at " + outputDir)
|
||||
}
|
||||
|
||||
18
docs/cli/k3kcli.md
Normal file
18
docs/cli/k3kcli.md
Normal file
@@ -0,0 +1,18 @@
|
||||
## k3kcli
|
||||
|
||||
CLI for K3K
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
-h, --help help for k3kcli
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
|
||||
24
docs/cli/k3kcli_cluster.md
Normal file
24
docs/cli/k3kcli_cluster.md
Normal file
@@ -0,0 +1,24 @@
|
||||
## k3kcli cluster
|
||||
|
||||
cluster command
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for cluster
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K
|
||||
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create new cluster
|
||||
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster
|
||||
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all the existing cluster
|
||||
|
||||
50
docs/cli/k3kcli_cluster_create.md
Normal file
50
docs/cli/k3kcli_cluster_create.md
Normal file
@@ -0,0 +1,50 @@
|
||||
## k3kcli cluster create
|
||||
|
||||
Create new cluster
|
||||
|
||||
```
|
||||
k3kcli cluster create [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli cluster create [command options] NAME
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--agent-args strings agents extra arguments
|
||||
--agent-envs strings agents extra Envs
|
||||
--agents int number of agents
|
||||
--cluster-cidr string cluster CIDR
|
||||
--custom-certs string The path for custom certificate directory
|
||||
-h, --help help for create
|
||||
--kubeconfig-server string override the kubeconfig server host
|
||||
--mirror-host-nodes Mirror Host Cluster Nodes
|
||||
--mode string k3k mode type (shared, virtual) (default "shared")
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
--persistence-type string persistence mode for the nodes (dynamic, ephemeral, static) (default "dynamic")
|
||||
--policy string The policy to create the cluster in
|
||||
--server-args strings servers extra arguments
|
||||
--server-envs strings servers extra Envs
|
||||
--servers int number of servers (default 1)
|
||||
--service-cidr string service CIDR
|
||||
--storage-class-name string storage class name for dynamic persistence type
|
||||
--storage-request-size string storage size for dynamic persistence type
|
||||
--token string token of the cluster
|
||||
--version string k3s version
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
|
||||
33
docs/cli/k3kcli_cluster_delete.md
Normal file
33
docs/cli/k3kcli_cluster_delete.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## k3kcli cluster delete
|
||||
|
||||
Delete an existing cluster
|
||||
|
||||
```
|
||||
k3kcli cluster delete [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli cluster delete [command options] NAME
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for delete
|
||||
--keep-data keeps persistence volumes created for the cluster after deletion
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
|
||||
32
docs/cli/k3kcli_cluster_list.md
Normal file
32
docs/cli/k3kcli_cluster_list.md
Normal file
@@ -0,0 +1,32 @@
|
||||
## k3kcli cluster list
|
||||
|
||||
List all the existing cluster
|
||||
|
||||
```
|
||||
k3kcli cluster list [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli cluster list [command options]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for list
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
|
||||
22
docs/cli/k3kcli_kubeconfig.md
Normal file
22
docs/cli/k3kcli_kubeconfig.md
Normal file
@@ -0,0 +1,22 @@
|
||||
## k3kcli kubeconfig
|
||||
|
||||
Manage kubeconfig for clusters
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for kubeconfig
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K
|
||||
* [k3kcli kubeconfig generate](k3kcli_kubeconfig_generate.md) - Generate kubeconfig for clusters
|
||||
|
||||
33
docs/cli/k3kcli_kubeconfig_generate.md
Normal file
33
docs/cli/k3kcli_kubeconfig_generate.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## k3kcli kubeconfig generate
|
||||
|
||||
Generate kubeconfig for clusters
|
||||
|
||||
```
|
||||
k3kcli kubeconfig generate [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--altNames strings altNames of the generated certificates for the kubeconfig
|
||||
--cn string Common name (CN) of the generated certificates for the kubeconfig (default "system:admin")
|
||||
--config-name string the name of the generated kubeconfig file
|
||||
--expiration-days int Expiration date of the certificates used for the kubeconfig (default 365)
|
||||
-h, --help help for generate
|
||||
--kubeconfig-server string override the kubeconfig server host
|
||||
--name string cluster name
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
--org strings Organization name (ORG) of the generated certificates for the kubeconfig
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
|
||||
|
||||
24
docs/cli/k3kcli_policy.md
Normal file
24
docs/cli/k3kcli_policy.md
Normal file
@@ -0,0 +1,24 @@
|
||||
## k3kcli policy
|
||||
|
||||
policy command
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for policy
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K
|
||||
* [k3kcli policy create](k3kcli_policy_create.md) - Create new policy
|
||||
* [k3kcli policy delete](k3kcli_policy_delete.md) - Delete an existing policy
|
||||
* [k3kcli policy list](k3kcli_policy_list.md) - List all the existing policies
|
||||
|
||||
32
docs/cli/k3kcli_policy_create.md
Normal file
32
docs/cli/k3kcli_policy_create.md
Normal file
@@ -0,0 +1,32 @@
|
||||
## k3kcli policy create
|
||||
|
||||
Create new policy
|
||||
|
||||
```
|
||||
k3kcli policy create [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli policy create [command options] NAME
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for create
|
||||
--mode string The allowed mode type of the policy (default "shared")
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
|
||||
31
docs/cli/k3kcli_policy_delete.md
Normal file
31
docs/cli/k3kcli_policy_delete.md
Normal file
@@ -0,0 +1,31 @@
|
||||
## k3kcli policy delete
|
||||
|
||||
Delete an existing policy
|
||||
|
||||
```
|
||||
k3kcli policy delete [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli policy delete [command options] NAME
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for delete
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
|
||||
31
docs/cli/k3kcli_policy_list.md
Normal file
31
docs/cli/k3kcli_policy_list.md
Normal file
@@ -0,0 +1,31 @@
|
||||
## k3kcli policy list
|
||||
|
||||
List all the existing policies
|
||||
|
||||
```
|
||||
k3kcli policy list [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
k3kcli policy list [command options]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for list
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
|
||||
@@ -86,6 +86,19 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
#### ClusterPhase
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
ClusterPhase is a high-level summary of the cluster's current lifecycle state.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterStatus](#clusterstatus)
|
||||
|
||||
|
||||
|
||||
#### ClusterSpec
|
||||
|
||||
|
||||
@@ -106,7 +119,7 @@ _Appears in:_
|
||||
| `clusterCIDR` _string_ | ClusterCIDR is the CIDR range for pod IPs.<br />Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode.<br />This field is immutable. | | |
|
||||
| `serviceCIDR` _string_ | ServiceCIDR is the CIDR range for service IPs.<br />Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode.<br />This field is immutable. | | |
|
||||
| `clusterDNS` _string_ | ClusterDNS is the IP address for the CoreDNS service.<br />Must be within the ServiceCIDR range. Defaults to 10.43.0.10.<br />This field is immutable. | | |
|
||||
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | \{ type:dynamic \} | |
|
||||
| `persistence` _[PersistenceConfig](#persistenceconfig)_ | Persistence specifies options for persisting etcd data.<br />Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.<br />A default StorageClass is required for dynamic persistence. | | |
|
||||
| `expose` _[ExposeConfig](#exposeconfig)_ | Expose specifies options for exposing the API server.<br />By default, it's only exposed as a ClusterIP. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
| `priorityClass` _string_ | PriorityClass specifies the priorityClassName for server/agent pods.<br />In "shared" mode, this also applies to workloads. | | |
|
||||
@@ -119,10 +132,68 @@ _Appears in:_
|
||||
| `addons` _[Addon](#addon) array_ | Addons specifies secrets containing raw YAML to deploy on cluster startup. | | |
|
||||
| `serverLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | ServerLimit specifies resource limits for server nodes. | | |
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
|
||||
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
|
||||
|
||||
|
||||
|
||||
|
||||
#### CredentialSource
|
||||
|
||||
|
||||
|
||||
CredentialSource defines where to get a credential from.
|
||||
It can represent either a TLS key pair or a single private key.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [CredentialSources](#credentialsources)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `secretName` _string_ | SecretName specifies the name of an existing secret to use.<br />The controller expects specific keys inside based on the credential type:<br />- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.<br />- For ServiceAccountTokenKey: 'tls.key'. | | |
|
||||
|
||||
|
||||
#### CredentialSources
|
||||
|
||||
|
||||
|
||||
CredentialSources lists all the required credentials, including both
|
||||
TLS key pairs and single signing keys.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [CustomCAs](#customcas)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `serverCA` _[CredentialSource](#credentialsource)_ | ServerCA specifies the server-ca cert/key pair. | | |
|
||||
| `clientCA` _[CredentialSource](#credentialsource)_ | ClientCA specifies the client-ca cert/key pair. | | |
|
||||
| `requestHeaderCA` _[CredentialSource](#credentialsource)_ | RequestHeaderCA specifies the request-header-ca cert/key pair. | | |
|
||||
| `etcdServerCA` _[CredentialSource](#credentialsource)_ | ETCDServerCA specifies the etcd-server-ca cert/key pair. | | |
|
||||
| `etcdPeerCA` _[CredentialSource](#credentialsource)_ | ETCDPeerCA specifies the etcd-peer-ca cert/key pair. | | |
|
||||
| `serviceAccountToken` _[CredentialSource](#credentialsource)_ | ServiceAccountToken specifies the service-account-token key. | | |
|
||||
|
||||
|
||||
#### CustomCAs
|
||||
|
||||
|
||||
|
||||
CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled toggles this feature on or off. | | |
|
||||
| `sources` _[CredentialSources](#credentialsources)_ | Sources defines the sources for all required custom CA certificates. | | |
|
||||
|
||||
|
||||
#### ExposeConfig
|
||||
|
||||
|
||||
@@ -202,13 +273,12 @@ PersistenceConfig specifies options for persisting etcd data.
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
- [ClusterStatus](#clusterstatus)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
|
||||
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
|
||||
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
|
||||
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 1G | |
|
||||
|
||||
|
||||
#### PersistenceMode
|
||||
|
||||
@@ -41,6 +41,7 @@ To see all the available Make commands you can run `make help`, i.e:
|
||||
test Run all the tests
|
||||
test-unit Run the unit tests (skips the e2e)
|
||||
test-controller Run the controller tests (pkg/controller)
|
||||
test-kubelet-controller Run the controller tests (pkg/controller)
|
||||
test-e2e Run the e2e tests
|
||||
generate Generate the CRDs specs
|
||||
docs Build the CRDs and CLI docs
|
||||
|
||||
147
docs/howtos/troubleshooting.md
Normal file
147
docs/howtos/troubleshooting.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Troubleshooting
|
||||
|
||||
This guide walks through common troubleshooting steps for working with K3K virtual clusters.
|
||||
|
||||
---
|
||||
|
||||
## `too many open files` error
|
||||
|
||||
The `k3k-kubelet` or `k3kcluster-server-` run into the following issue:
|
||||
|
||||
```sh
|
||||
E0604 13:14:53.369369 1 leaderelection.go:336] error initially creating leader election record: Post "https://k3k-http-proxy-k3kcluster-service/apis/coordination.k8s.io/v1/namespaces/kube-system/leases": context canceled
|
||||
{"level":"fatal","timestamp":"2025-06-04T13:14:53.369Z","logger":"k3k-kubelet","msg":"virtual manager stopped","error":"too many open files"}
|
||||
```
|
||||
|
||||
This typically indicates a low limit on inotify watchers or file descriptors on the host system.
|
||||
|
||||
To increase the inotify limits connect to the host nodes and run:
|
||||
|
||||
```sh
|
||||
sudo sysctl -w fs.inotify.max_user_watches=2099999999
|
||||
sudo sysctl -w fs.inotify.max_user_instances=2099999999
|
||||
sudo sysctl -w fs.inotify.max_queued_events=2099999999
|
||||
```
|
||||
|
||||
You can persist these settings by adding them to `/etc/sysctl.conf`:
|
||||
|
||||
```sh
|
||||
fs.inotify.max_user_watches=2099999999
|
||||
fs.inotify.max_user_instances=2099999999
|
||||
fs.inotify.max_queued_events=2099999999
|
||||
```
|
||||
|
||||
Apply the changes:
|
||||
|
||||
```sh
|
||||
sudo sysctl -p
|
||||
```
|
||||
|
||||
You can find more details in this [KB document](https://www.suse.com/support/kb/doc/?id=000020048).
|
||||
|
||||
---
|
||||
|
||||
## Inspect Controller Logs for Failure Diagnosis
|
||||
|
||||
To view logs for a failed virtual cluster:
|
||||
|
||||
```sh
|
||||
kubectl logs -n k3k-system -l app.kubernetes.io/name=k3k
|
||||
```
|
||||
|
||||
This retrieves logs from K3k controller components.
|
||||
|
||||
---
|
||||
|
||||
## Inspect Cluster Logs for Failure Diagnosis
|
||||
|
||||
To view logs for a failed virtual cluster:
|
||||
|
||||
```sh
|
||||
kubectl logs -n <cluster_namespace> -l cluster=<cluster_name>
|
||||
```
|
||||
|
||||
This retrieves logs from K3k cluster components (`agents, server and virtual-kubelet`).
|
||||
|
||||
> 💡 You can also use `kubectl describe cluster <cluster_name>` to check for recent events and status conditions.
|
||||
|
||||
---
|
||||
|
||||
## Virtual Cluster Not Starting or Stuck in Pending
|
||||
|
||||
Some of the most common causes are related to missing prerequisites or wrong configuration.
|
||||
|
||||
### Storage class not available
|
||||
|
||||
When creating a Virtual Cluster with `dynamic` persistence, a PVC is needed. You can check if the PVC was claimed but not bound with `kubectl get pvc -n <cluster_namespace>`. If you see a pending PVC you probably don't have a default storage class defined, or you have specified a wrong one.
|
||||
|
||||
#### Example with wrong storage class
|
||||
|
||||
The `pvc` is pending:
|
||||
|
||||
```bash
|
||||
kubectl get pvc -n k3k-test-storage
|
||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
|
||||
varlibrancherk3s-k3k-test-storage-server-0 Pending not-available <unset> 4s
|
||||
```
|
||||
|
||||
The `server` is pending:
|
||||
|
||||
```bash
|
||||
kubectl get po -n k3k-test-storage
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k3k-test-storage-kubelet-j4zn5 1/1 Running 0 54s
|
||||
k3k-test-storage-server-0 0/1 Pending 0 54s
|
||||
```
|
||||
|
||||
To fix this you should use a valid storage class, you can list existing storage class using:
|
||||
|
||||
```bash
|
||||
kubectl get storageclasses.storage.k8s.io
|
||||
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||
local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 3d6h
|
||||
```
|
||||
|
||||
### Wrong node selector
|
||||
|
||||
When creating a Virtual Cluster with `defaultNodeSelector`, if the selector is not valid all pods will be pending.
|
||||
|
||||
#### Example
|
||||
|
||||
The `server` is pending:
|
||||
|
||||
```bash
|
||||
kubectl get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k3k-k3kcluster-node-placed-server-0 0/1 Pending 0 58s
|
||||
```
|
||||
|
||||
The description of the pod provide the reason:
|
||||
|
||||
```bash
|
||||
kubectl describe po k3k-k3kcluster-node-placed-server-0
|
||||
...
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Warning FailedScheduling 84s default-scheduler 0/1 nodes are available: 1 node(s) didn't match Pod's node affinity/selector. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.
|
||||
```
|
||||
|
||||
To fix this you should use a valid node affinity/selector.
|
||||
|
||||
### Image pull issues (airgapped setup)
|
||||
|
||||
When creating a Virtual Cluster in air-gapped environment, images need to be available in the configured registry. You can check for `ImagePullBackOff` status when getting the pods in the virtual cluster namespace.
|
||||
|
||||
#### Example
|
||||
|
||||
The `server` is failing:
|
||||
|
||||
```bash
|
||||
kubectl get po -n k3k-test-registry
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
k3k-test-registry-kubelet-r4zh5 1/1 Running 0 54s
|
||||
k3k-test-registry-server-0 0/1 ImagePullBackOff 0 54s
|
||||
```
|
||||
|
||||
To fix this make sure the failing image is available. You can describe the failing pod to get more details.
|
||||
34
go.mod
34
go.mod
@@ -17,10 +17,10 @@ require (
|
||||
github.com/onsi/gomega v1.36.0
|
||||
github.com/rancher/dynamiclistener v1.27.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.35.0
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
|
||||
github.com/urfave/cli/v2 v2.27.5
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
|
||||
go.etcd.io/etcd/api/v3 v3.5.16
|
||||
go.etcd.io/etcd/client/v3 v3.5.16
|
||||
@@ -37,11 +37,20 @@ require (
|
||||
k8s.io/component-helpers v0.31.4
|
||||
k8s.io/kubectl v0.31.4
|
||||
k8s.io/kubelet v0.31.4
|
||||
k8s.io/kubernetes v1.31.4
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
sigs.k8s.io/controller-runtime v0.19.4
|
||||
)
|
||||
|
||||
require github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
require (
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
@@ -68,7 +77,7 @@ require (
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
@@ -85,7 +94,7 @@ require (
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
@@ -165,9 +174,9 @@ require (
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
@@ -176,13 +185,12 @@ require (
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
@@ -202,9 +210,9 @@ require (
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/grpc v1.65.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
|
||||
google.golang.org/grpc v1.67.3 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
||||
65
go.sum
65
go.sum
@@ -82,9 +82,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
|
||||
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
@@ -138,8 +137,8 @@ github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6
|
||||
github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
@@ -167,6 +166,8 @@ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpv
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
@@ -352,6 +353,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
|
||||
@@ -385,6 +388,8 @@ github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmi
|
||||
github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
|
||||
github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
|
||||
@@ -399,12 +404,18 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
|
||||
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
|
||||
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
|
||||
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
|
||||
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -424,6 +435,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/testcontainers/testcontainers-go v0.35.0 h1:uADsZpTKFAtp8SLK+hMwSaa+X+JiERHtd4sQAFmXeMo=
|
||||
github.com/testcontainers/testcontainers-go v0.35.0/go.mod h1:oEVBj5zrfJTrgjwONs1SsRbnBtH9OKl+IGl3UMcr2B4=
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0 h1:zEfdO1Dz7sA2jNpf1PVCOI6FND1t/mDpaeDCguaLRXw=
|
||||
@@ -434,8 +447,6 @@ github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+F
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
|
||||
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803 h1:0O149bxUoQL69b4+pcGaCbKk2bvA/43AhkczkDuRjMc=
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803/go.mod h1:SHfH2bqArcMTBh/JejdbtsyZwmYYqkpJnABOyipjT54=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
@@ -452,8 +463,6 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
|
||||
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
@@ -485,10 +494,10 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
@@ -638,19 +647,19 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
|
||||
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8=
|
||||
google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -717,6 +726,8 @@ k8s.io/kubectl v0.31.4 h1:c8Af8xd1VjyoKyWMW0xHv2+tYxEjne8s6OOziMmaD10=
|
||||
k8s.io/kubectl v0.31.4/go.mod h1:0E0rpXg40Q57wRE6LB9su+4tmwx1IzZrmIEvhQPk0i4=
|
||||
k8s.io/kubelet v0.31.4 h1:6TokbMv+HnFG7Oe9tVS/J0VPGdC4GnsQZXuZoo7Ixi8=
|
||||
k8s.io/kubelet v0.31.4/go.mod h1:8ZM5LZyANoVxUtmayUxD/nsl+6GjREo7kSanv8AoL4U=
|
||||
k8s.io/kubernetes v1.31.4 h1:VQDX52gTQnq8C/jCo48AQuDsWbWMh9XXxhQRDYjgakw=
|
||||
k8s.io/kubernetes v1.31.4/go.mod h1:9xmT2buyTYj8TRKwRae7FcuY8k5+xlxv7VivvO0KKfs=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo=
|
||||
|
||||
@@ -2,73 +2,22 @@ package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// config has all virtual-kubelet startup options
|
||||
type config struct {
|
||||
ClusterName string `yaml:"clusterName,omitempty"`
|
||||
ClusterNamespace string `yaml:"clusterNamespace,omitempty"`
|
||||
ServiceName string `yaml:"serviceName,omitempty"`
|
||||
Token string `yaml:"token,omitempty"`
|
||||
AgentHostname string `yaml:"agentHostname,omitempty"`
|
||||
HostConfigPath string `yaml:"hostConfigPath,omitempty"`
|
||||
VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"`
|
||||
KubeletPort string `yaml:"kubeletPort,omitempty"`
|
||||
ServerIP string `yaml:"serverIP,omitempty"`
|
||||
Version string `yaml:"version,omitempty"`
|
||||
}
|
||||
|
||||
func (c *config) unmarshalYAML(data []byte) error {
|
||||
var conf config
|
||||
|
||||
if err := yaml.Unmarshal(data, &conf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.ClusterName == "" {
|
||||
c.ClusterName = conf.ClusterName
|
||||
}
|
||||
|
||||
if c.ClusterNamespace == "" {
|
||||
c.ClusterNamespace = conf.ClusterNamespace
|
||||
}
|
||||
|
||||
if c.HostConfigPath == "" {
|
||||
c.HostConfigPath = conf.HostConfigPath
|
||||
}
|
||||
|
||||
if c.VirtualConfigPath == "" {
|
||||
c.VirtualConfigPath = conf.VirtualConfigPath
|
||||
}
|
||||
|
||||
if c.KubeletPort == "" {
|
||||
c.KubeletPort = conf.KubeletPort
|
||||
}
|
||||
|
||||
if c.AgentHostname == "" {
|
||||
c.AgentHostname = conf.AgentHostname
|
||||
}
|
||||
|
||||
if c.ServiceName == "" {
|
||||
c.ServiceName = conf.ServiceName
|
||||
}
|
||||
|
||||
if c.Token == "" {
|
||||
c.Token = conf.Token
|
||||
}
|
||||
|
||||
if c.ServerIP == "" {
|
||||
c.ServerIP = conf.ServerIP
|
||||
}
|
||||
|
||||
if c.Version == "" {
|
||||
c.Version = conf.Version
|
||||
}
|
||||
|
||||
return nil
|
||||
ClusterName string `mapstructure:"clusterName"`
|
||||
ClusterNamespace string `mapstructure:"clusterNamespace"`
|
||||
ServiceName string `mapstructure:"serviceName"`
|
||||
Token string `mapstructure:"token"`
|
||||
AgentHostname string `mapstructure:"agentHostname"`
|
||||
HostKubeconfig string `mapstructure:"hostKubeconfig"`
|
||||
VirtKubeconfig string `mapstructure:"virtKubeconfig"`
|
||||
KubeletPort int `mapstructure:"kubeletPort"`
|
||||
WebhookPort int `mapstructure:"webhookPort"`
|
||||
ServerIP string `mapstructure:"serverIP"`
|
||||
Version string `mapstructure:"version"`
|
||||
MirrorHostNodes bool `mapstructure:"mirrorHostNodes"`
|
||||
}
|
||||
|
||||
func (c *config) validate() error {
|
||||
@@ -86,16 +35,3 @@ func (c *config) validate() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *config) parse(path string) error {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.unmarshalYAML(b)
|
||||
}
|
||||
|
||||
@@ -5,15 +5,17 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
const ConfigMapSyncerName = "configmap-syncer"
|
||||
@@ -125,6 +127,7 @@ func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name strin
|
||||
|
||||
// lock in write mode since we are now adding the key
|
||||
c.mutex.Lock()
|
||||
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
@@ -135,7 +138,6 @@ func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name strin
|
||||
_, err := c.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
@@ -164,6 +166,7 @@ func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name st
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
169
k3k-kubelet/controller/controller_suite_test.go
Normal file
169
k3k-kubelet/controller/controller_suite_test.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package controller_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Cluster Controller Suite")
|
||||
}
|
||||
|
||||
type TestEnv struct {
|
||||
*envtest.Environment
|
||||
k8s *kubernetes.Clientset
|
||||
k8sClient client.Client
|
||||
}
|
||||
|
||||
var (
|
||||
hostTestEnv *TestEnv
|
||||
hostManager ctrl.Manager
|
||||
virtTestEnv *TestEnv
|
||||
virtManager ctrl.Manager
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
hostTestEnv = NewTestEnv()
|
||||
By("HOST testEnv running at :" + hostTestEnv.ControlPlane.APIServer.Port)
|
||||
|
||||
virtTestEnv = NewTestEnv()
|
||||
By("VIRT testEnv running at :" + virtTestEnv.ControlPlane.APIServer.Port)
|
||||
|
||||
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
|
||||
ctrl.SetupSignalHandler()
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
|
||||
err := hostTestEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = virtTestEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
tmpKubebuilderDir := path.Join(os.TempDir(), "kubebuilder")
|
||||
err = os.RemoveAll(tmpKubebuilderDir)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
func NewTestEnv() *TestEnv {
|
||||
GinkgoHelper()
|
||||
|
||||
binaryAssetsDirectory := os.Getenv("KUBEBUILDER_ASSETS")
|
||||
if binaryAssetsDirectory == "" {
|
||||
binaryAssetsDirectory = "/usr/local/kubebuilder/bin"
|
||||
}
|
||||
|
||||
tmpKubebuilderDir := path.Join(os.TempDir(), "kubebuilder")
|
||||
|
||||
if err := os.Mkdir(tmpKubebuilderDir, 0o755); !errors.Is(err, os.ErrExist) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
tempDir, err := os.MkdirTemp(tmpKubebuilderDir, "envtest-*")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = os.CopyFS(tempDir, os.DirFS(binaryAssetsDirectory))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
|
||||
testEnv := &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "charts", "k3k", "crds")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
BinaryAssetsDirectory: tempDir,
|
||||
Scheme: buildScheme(),
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8s, err := kubernetes.NewForConfig(cfg)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sClient, err := client.New(cfg, client.Options{Scheme: testEnv.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return &TestEnv{
|
||||
Environment: testEnv,
|
||||
k8s: k8s,
|
||||
k8sClient: k8sClient,
|
||||
}
|
||||
}
|
||||
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return scheme
|
||||
}
|
||||
|
||||
var _ = Describe("Kubelet Controller", func() {
|
||||
var (
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
hostManager, err = ctrl.NewManager(hostTestEnv.Config, ctrl.Options{
|
||||
// disable the metrics server
|
||||
Metrics: metricsserver.Options{BindAddress: "0"},
|
||||
Scheme: hostTestEnv.Scheme,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
virtManager, err = ctrl.NewManager(virtTestEnv.Config, ctrl.Options{
|
||||
// disable the metrics server
|
||||
Metrics: metricsserver.Options{BindAddress: "0"},
|
||||
Scheme: virtTestEnv.Scheme,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
err := hostManager.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to run host manager")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
err := virtManager.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to run virt manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cancel()
|
||||
})
|
||||
|
||||
Describe("PriorityClass", PriorityClassTests)
|
||||
})
|
||||
@@ -5,15 +5,17 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
type ControllerHandler struct {
|
||||
@@ -51,6 +53,7 @@ func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object)
|
||||
if controllers != nil {
|
||||
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
|
||||
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
|
||||
c.RUnlock()
|
||||
|
||||
return err
|
||||
@@ -101,12 +104,12 @@ func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object)
|
||||
Named(r.Name()).
|
||||
For(&v1.ConfigMap{}).
|
||||
Complete(r)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to start configmap controller: %w", err)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
|
||||
if c.controllers == nil {
|
||||
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
|
||||
}
|
||||
|
||||
@@ -3,17 +3,19 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -3,17 +3,19 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-helpers/storage/volume"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
229
k3k-kubelet/controller/priority_class_test.go
Normal file
229
k3k-kubelet/controller/priority_class_test.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package controller_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var PriorityClassTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = controller.AddPriorityClassReconciler(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a priorityClass on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Value: 1001,
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
Expect(hostPriorityClass.Labels).To(ContainElement("bar"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostPriorityClass.Labels)
|
||||
})
|
||||
|
||||
It("updates a priorityClass on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
|
||||
Value: 1001,
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
Expect(hostPriorityClass.Labels).NotTo(ContainElement("bar"))
|
||||
|
||||
key := client.ObjectKeyFromObject(priorityClass)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
priorityClass.Labels = map[string]string{"foo": "bar"}
|
||||
|
||||
// update virtual priorityClass
|
||||
err = virtTestEnv.k8sClient.Update(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(priorityClass.Labels).To(ContainElement("bar"))
|
||||
|
||||
// check hostPriorityClass
|
||||
Eventually(func() map[string]string {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostPriorityClass.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(ContainElement("bar"))
|
||||
})
|
||||
|
||||
It("deletes a priorityClass on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
|
||||
Value: 1001,
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in host cluster", hostPriorityClassName))
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("creates a priorityClass on the host cluster with the globalDefault annotation", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
|
||||
Value: 1001,
|
||||
GlobalDefault: true,
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in host cluster without the GlobalDefault value", hostPriorityClassName))
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
Expect(hostPriorityClass.GlobalDefault).To(BeFalse())
|
||||
Expect(hostPriorityClass.Annotations[controller.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
|
||||
})
|
||||
}
|
||||
|
||||
func translateName(cluster v1alpha1.Cluster, namespace, name string) string {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: cluster.Name,
|
||||
ClusterNamespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
return translator.TranslateName(namespace, name)
|
||||
}
|
||||
159
k3k-kubelet/controller/priorityclass.go
Normal file
159
k3k-kubelet/controller/priorityclass.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
PriorityClassGlobalDefaultAnnotation = "priorityclass.k3k.io/globalDefault"
|
||||
|
||||
priorityClassControllerName = "priorityclass-syncer-controller"
|
||||
priorityClassFinalizerName = "priorityclass.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PriorityClassReconciler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
|
||||
// AddPriorityClassReconciler adds a PriorityClass reconciler to k3k-kubelet
|
||||
func AddPriorityClassReconciler(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
// initialize a new Reconciler
|
||||
reconciler := PriorityClassReconciler{
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
Translator: translator,
|
||||
}
|
||||
|
||||
name := translator.TranslateName("", priorityClassControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&schedulingv1.PriorityClass{}).
|
||||
WithEventFilter(ignoreSystemPrefixPredicate).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
// IgnoreSystemPrefixPredicate filters out resources whose names start with "system-".
|
||||
var ignoreSystemPrefixPredicate = predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
return !strings.HasPrefix(e.ObjectOld.GetName(), "system-")
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return !strings.HasPrefix(e.Object.GetName(), "system-")
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
return !strings.HasPrefix(e.Object.GetName(), "system-")
|
||||
},
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
return !strings.HasPrefix(e.Object.GetName(), "system-")
|
||||
},
|
||||
}
|
||||
|
||||
func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
priorityClass schedulingv1.PriorityClass
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
hostPriorityClass := r.translatePriorityClass(priorityClass)
|
||||
|
||||
// handle deletion
|
||||
if !priorityClass.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
// TODO add test for previous implementation without err != nil check, and also check the other controllers
|
||||
if err := r.hostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&priorityClass, priorityClassFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&priorityClass, priorityClassFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create the priorityClass on the host
|
||||
log.Info("creating the priorityClass for the first time on the host cluster")
|
||||
|
||||
err := r.hostClient.Create(ctx, hostPriorityClass)
|
||||
if err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, r.hostClient.Update(ctx, hostPriorityClass)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *PriorityClassReconciler) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
|
||||
hostPriorityClass := priorityClass.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPriorityClass)
|
||||
|
||||
if hostPriorityClass.Annotations == nil {
|
||||
hostPriorityClass.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
if hostPriorityClass.GlobalDefault {
|
||||
hostPriorityClass.GlobalDefault = false
|
||||
hostPriorityClass.Annotations[PriorityClassGlobalDefaultAnnotation] = "true"
|
||||
}
|
||||
|
||||
return hostPriorityClass
|
||||
}
|
||||
@@ -5,15 +5,17 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
const SecretSyncerName = "secret-syncer"
|
||||
@@ -117,12 +119,15 @@ func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string)
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if s.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// lock in write mode since we are now adding the key
|
||||
s.mutex.Lock()
|
||||
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
@@ -133,7 +138,6 @@ func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string)
|
||||
_, err := s.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
@@ -162,6 +166,7 @@ func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name strin
|
||||
}
|
||||
|
||||
s.mutex.Lock()
|
||||
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
@@ -174,11 +179,11 @@ func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name strin
|
||||
|
||||
func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vSecret corev1.Secret
|
||||
|
||||
err := s.VirtualClient.Get(ctx, types.NamespacedName{
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}, &vSecret)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
@@ -3,17 +3,19 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -7,24 +7,25 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
webhookName = "podmutator.k3k.io"
|
||||
webhookTimeout = int32(10)
|
||||
webhookPort = "9443"
|
||||
webhookPath = "/mutate--v1-pod"
|
||||
FieldpathField = "k3k.io/fieldpath"
|
||||
)
|
||||
@@ -36,12 +37,13 @@ type webhookHandler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
logger *log.Logger
|
||||
webhookPort int
|
||||
}
|
||||
|
||||
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
|
||||
// modify the nodeName of the created pods with the name of the virtual kubelet node name
|
||||
// as well as remove any status fields of the downward apis env fields
|
||||
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger) error {
|
||||
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger, webhookPort int) error {
|
||||
handler := webhookHandler{
|
||||
client: mgr.GetClient(),
|
||||
scheme: mgr.GetScheme(),
|
||||
@@ -49,6 +51,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
|
||||
serviceName: serviceName,
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
webhookPort: webhookPort,
|
||||
}
|
||||
|
||||
// create mutator webhook configuration to the cluster
|
||||
@@ -99,9 +102,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
|
||||
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
|
||||
w.logger.Infow("extracting webhook tls from host cluster")
|
||||
|
||||
var (
|
||||
webhookTLSSecret v1.Secret
|
||||
)
|
||||
var webhookTLSSecret v1.Secret
|
||||
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil {
|
||||
return nil, err
|
||||
@@ -112,7 +113,7 @@ func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlrunti
|
||||
return nil, errors.New("webhook CABundle does not exist in secret")
|
||||
}
|
||||
|
||||
webhookURL := "https://" + w.serviceName + ":" + webhookPort + webhookPath
|
||||
webhookURL := fmt.Sprintf("https://%s:%d%s", w.serviceName, w.webhookPort, webhookPath)
|
||||
|
||||
return &admissionregistrationv1.MutatingWebhookConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
||||
@@ -8,10 +8,33 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider"
|
||||
@@ -21,26 +44,6 @@ import (
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
"go.uber.org/zap"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -72,7 +75,7 @@ type kubelet struct {
|
||||
}
|
||||
|
||||
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
|
||||
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostConfigPath)
|
||||
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -84,7 +87,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
return nil, err
|
||||
}
|
||||
|
||||
virtConfig, err := virtRestConfig(ctx, c.VirtualConfigPath, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
|
||||
virtConfig, err := virtRestConfig(ctx, c.VirtKubeconfig, hostClient, c.ClusterName, c.ClusterNamespace, c.Token, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -96,13 +99,21 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
|
||||
ctrl.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
hostMetricsBindAddress := ":8083"
|
||||
virtualMetricsBindAddress := ":8084"
|
||||
|
||||
if c.MirrorHostNodes {
|
||||
hostMetricsBindAddress = "0"
|
||||
virtualMetricsBindAddress = "0"
|
||||
}
|
||||
|
||||
hostMgr, err := ctrl.NewManager(hostConfig, manager.Options{
|
||||
Scheme: baseScheme,
|
||||
LeaderElection: true,
|
||||
LeaderElectionNamespace: c.ClusterNamespace,
|
||||
LeaderElectionID: c.ClusterName,
|
||||
Metrics: ctrlserver.Options{
|
||||
BindAddress: ":8083",
|
||||
BindAddress: hostMetricsBindAddress,
|
||||
},
|
||||
Cache: cache.Options{
|
||||
DefaultNamespaces: map[string]cache.Config{
|
||||
@@ -122,6 +133,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
CertDir: "/opt/rancher/k3k-webhook",
|
||||
Port: c.WebhookPort,
|
||||
})
|
||||
|
||||
virtualMgr, err := ctrl.NewManager(virtConfig, manager.Options{
|
||||
@@ -131,17 +143,16 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
LeaderElectionNamespace: "kube-system",
|
||||
LeaderElectionID: c.ClusterName,
|
||||
Metrics: ctrlserver.Options{
|
||||
BindAddress: ":8084",
|
||||
BindAddress: virtualMetricsBindAddress,
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod mutator webhook")
|
||||
|
||||
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger); err != nil {
|
||||
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
|
||||
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
@@ -163,6 +174,12 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding priorityclass controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPriorityClassReconciler(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("failed to add priorityclass controller: " + err.Error())
|
||||
}
|
||||
|
||||
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
|
||||
if err != nil {
|
||||
return nil, errors.New("failed to extract the clusterIP for the server service: " + err.Error())
|
||||
@@ -195,6 +212,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
logger: logger.Named(k3kKubeletName),
|
||||
token: c.Token,
|
||||
dnsIP: dnsService.Spec.ClusterIP,
|
||||
port: c.KubeletPort,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -213,9 +231,9 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
|
||||
return service.Spec.ClusterIP, nil
|
||||
}
|
||||
|
||||
func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP, version string) error {
|
||||
providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version)
|
||||
nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP)
|
||||
func (k *kubelet) registerNode(ctx context.Context, agentIP string, cfg config) error {
|
||||
providerFunc := k.newProviderFunc(cfg)
|
||||
nodeOpts := k.nodeOpts(ctx, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
|
||||
|
||||
var err error
|
||||
|
||||
@@ -266,22 +284,22 @@ func (k *kubelet) start(ctx context.Context) {
|
||||
k.logger.Info("node exited successfully")
|
||||
}
|
||||
|
||||
func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version string) nodeutil.NewProviderFunc {
|
||||
func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
|
||||
return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
|
||||
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, namespace, name, serverIP, dnsIP)
|
||||
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, cfg.ClusterNamespace, cfg.ClusterName, cfg.ServerIP, k.dnsIP)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
|
||||
}
|
||||
|
||||
provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, version)
|
||||
provider.ConfigureNode(k.logger, pc.Node, cfg.AgentHostname, k.port, k.agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, cfg.Version, cfg.MirrorHostNodes)
|
||||
|
||||
return utilProvider, &provider.Node{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (k *kubelet) nodeOpts(ctx context.Context, srvPort, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
|
||||
func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
|
||||
return func(c *nodeutil.NodeConfig) error {
|
||||
c.HTTPListenAddr = fmt.Sprintf(":%s", srvPort)
|
||||
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
|
||||
// set up the routes
|
||||
mux := http.NewServeMux()
|
||||
if err := nodeutil.AttachProviderRoutes(mux)(c); err != nil {
|
||||
@@ -334,7 +352,6 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
|
||||
b.ClientCA.Content,
|
||||
b.ClientCAKey.Content,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -393,12 +410,13 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
}
|
||||
|
||||
// POD IP
|
||||
podIP := net.ParseIP(os.Getenv("POD_IP"))
|
||||
ip := net.ParseIP(agentIP)
|
||||
|
||||
altNames := certutil.AltNames{
|
||||
DNSNames: []string{hostname},
|
||||
IPs: []net.IP{ip},
|
||||
IPs: []net.IP{ip, podIP},
|
||||
}
|
||||
|
||||
cert, key, err := certs.CreateClientCertKey(nodeName, nil, &altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 0, b.ServerCA.Content, b.ServerCAKey.Content)
|
||||
|
||||
@@ -2,14 +2,21 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -20,119 +27,101 @@ var (
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "k3k-kubelet"
|
||||
app.Usage = "virtual kubelet implementation k3k"
|
||||
app.Flags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-name",
|
||||
Usage: "Name of the k3k cluster",
|
||||
Destination: &cfg.ClusterName,
|
||||
EnvVars: []string{"CLUSTER_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-namespace",
|
||||
Usage: "Namespace of the k3k cluster",
|
||||
Destination: &cfg.ClusterNamespace,
|
||||
EnvVars: []string{"CLUSTER_NAMESPACE"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-token",
|
||||
Usage: "K3S token of the k3k cluster",
|
||||
Destination: &cfg.Token,
|
||||
EnvVars: []string{"CLUSTER_TOKEN"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "host-config-path",
|
||||
Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config",
|
||||
Destination: &cfg.HostConfigPath,
|
||||
EnvVars: []string{"HOST_KUBECONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "virtual-config-path",
|
||||
Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster",
|
||||
Destination: &cfg.VirtualConfigPath,
|
||||
EnvVars: []string{"CLUSTER_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "kubelet-port",
|
||||
Usage: "kubelet API port number",
|
||||
Destination: &cfg.KubeletPort,
|
||||
EnvVars: []string{"SERVER_PORT"},
|
||||
Value: "10250",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "service-name",
|
||||
Usage: "The service name deployed by the k3k controller",
|
||||
Destination: &cfg.ServiceName,
|
||||
EnvVars: []string{"SERVICE_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "agent-hostname",
|
||||
Usage: "Agent Hostname used for TLS SAN for the kubelet server",
|
||||
Destination: &cfg.AgentHostname,
|
||||
EnvVars: []string{"AGENT_HOSTNAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "server-ip",
|
||||
Usage: "Server IP used for registering the virtual kubelet to the cluster",
|
||||
Destination: &cfg.ServerIP,
|
||||
EnvVars: []string{"SERVER_IP"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "version",
|
||||
Usage: "Version of kubernetes server",
|
||||
Destination: &cfg.Version,
|
||||
EnvVars: []string{"VERSION"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Path to k3k-kubelet config file",
|
||||
Destination: &configFile,
|
||||
EnvVars: []string{"CONFIG_FILE"},
|
||||
Value: "/etc/rancher/k3k/config.yaml",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Enable debug logging",
|
||||
Destination: &debug,
|
||||
EnvVars: []string{"DEBUG"},
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "k3k-kubelet",
|
||||
Short: "virtual kubelet implementation k3k",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := InitializeConfig(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.New(debug)
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
return nil
|
||||
},
|
||||
RunE: run,
|
||||
}
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
logger = log.New(debug)
|
||||
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
|
||||
|
||||
return nil
|
||||
}
|
||||
app.Action = run
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ClusterName, "cluster-name", "", "Name of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ClusterNamespace, "cluster-namespace", "", "Namespace of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", "", "K3S token of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.HostKubeconfig, "host-kubeconfig", "", "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.VirtKubeconfig, "virt-kubeconfig", "", "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster")
|
||||
rootCmd.PersistentFlags().IntVar(&cfg.KubeletPort, "kubelet-port", 0, "kubelet API port number")
|
||||
rootCmd.PersistentFlags().IntVar(&cfg.WebhookPort, "webhook-port", 0, "Webhook port number")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ServiceName, "service-name", "", "The service name deployed by the k3k controller")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.AgentHostname, "agent-hostname", "", "Agent Hostname used for TLS SAN for the kubelet server")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.Version, "version", "", "Version of kubernetes server")
|
||||
rootCmd.PersistentFlags().StringVar(&configFile, "config", "/opt/rancher/k3k/config.yaml", "Path to k3k-kubelet config file")
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
|
||||
rootCmd.PersistentFlags().BoolVar(&cfg.MirrorHostNodes, "mirror-host-nodes", false, "Mirror real node objects from host cluster")
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func run(clx *cli.Context) error {
|
||||
func run(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if err := cfg.parse(configFile); err != nil {
|
||||
logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := cfg.validate(); err != nil {
|
||||
logger.Fatalw("failed to validate config", zap.Error(err))
|
||||
return fmt.Errorf("failed to validate config: %w", err)
|
||||
}
|
||||
|
||||
k, err := newKubelet(ctx, &cfg, logger)
|
||||
if err != nil {
|
||||
logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err))
|
||||
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
|
||||
}
|
||||
|
||||
if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP, cfg.Version); err != nil {
|
||||
logger.Fatalw("failed to register new node", zap.Error(err))
|
||||
if err := k.registerNode(ctx, k.agentIP, cfg); err != nil {
|
||||
return fmt.Errorf("failed to register new node: %w", err)
|
||||
}
|
||||
|
||||
k.start(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitializeConfig sets up viper to read from config file, environment variables, and flags.
|
||||
// It uses a `flatcase` convention for viper keys to match the (lowercased) config file keys,
|
||||
// while flags remain in kebab-case.
|
||||
func InitializeConfig(cmd *cobra.Command) error {
|
||||
var err error
|
||||
|
||||
// Bind every cobra flag to a viper key.
|
||||
// The viper key will be the flag name with dashes removed (flatcase).
|
||||
// e.g. "cluster-name" becomes "clustername"
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
configName := strings.ReplaceAll(f.Name, "-", "")
|
||||
envName := strings.ToUpper(strings.ReplaceAll(f.Name, "-", "_"))
|
||||
|
||||
err = errors.Join(err, viper.BindPFlag(configName, f))
|
||||
err = errors.Join(err, viper.BindEnv(configName, envName))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile = viper.GetString("config")
|
||||
viper.SetConfigFile(configFile)
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
var notFoundErr viper.ConfigFileNotFoundError
|
||||
if errors.As(err, ¬FoundErr) || errors.Is(err, os.ErrNotExist) {
|
||||
return fmt.Errorf("no config file found: %w", err)
|
||||
} else {
|
||||
return fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal all configuration into the global cfg struct.
|
||||
// Viper correctly handles the precedence of flags > env > config.
|
||||
if err := viper.Unmarshal(&cfg); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal config: %w", err)
|
||||
}
|
||||
// Separately get the debug flag, as it's not part of the main config struct.
|
||||
debug = viper.GetBool("debug")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -91,14 +91,20 @@ var _ compbasemetrics.StableCollector = &resourceMetricsCollector{}
|
||||
|
||||
// DescribeWithStability implements compbasemetrics.StableCollector
|
||||
func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc) {
|
||||
ch <- nodeCPUUsageDesc
|
||||
ch <- nodeMemoryUsageDesc
|
||||
ch <- containerStartTimeDesc
|
||||
ch <- containerCPUUsageDesc
|
||||
ch <- containerMemoryUsageDesc
|
||||
ch <- podCPUUsageDesc
|
||||
ch <- podMemoryUsageDesc
|
||||
ch <- resourceScrapeResultDesc
|
||||
descs := []*compbasemetrics.Desc{
|
||||
nodeCPUUsageDesc,
|
||||
nodeMemoryUsageDesc,
|
||||
containerStartTimeDesc,
|
||||
containerCPUUsageDesc,
|
||||
containerMemoryUsageDesc,
|
||||
podCPUUsageDesc,
|
||||
podMemoryUsageDesc,
|
||||
resourceScrapeResultDesc,
|
||||
}
|
||||
|
||||
for _, desc := range descs {
|
||||
ch <- desc
|
||||
}
|
||||
}
|
||||
|
||||
// CollectWithStability implements compbasemetrics.StableCollector
|
||||
|
||||
@@ -4,55 +4,71 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string) {
|
||||
node.Status.Conditions = nodeConditions()
|
||||
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
|
||||
node.Status.Addresses = []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: hostname,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: ip,
|
||||
},
|
||||
}
|
||||
|
||||
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
|
||||
node.Labels["kubernetes.io/os"] = "linux"
|
||||
|
||||
// configure versions
|
||||
node.Status.NodeInfo.KubeletVersion = version
|
||||
|
||||
updateNodeCapacityInterval := 10 * time.Second
|
||||
ticker := time.NewTicker(updateNodeCapacityInterval)
|
||||
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
if err := updateNodeCapacity(coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
|
||||
logger.Error("error updating node capacity", err)
|
||||
}
|
||||
func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string, mirrorHostNodes bool) {
|
||||
ctx := context.Background()
|
||||
if mirrorHostNodes {
|
||||
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logger.Fatal("error getting host node for mirroring", err)
|
||||
}
|
||||
}()
|
||||
|
||||
node.Spec = *hostNode.Spec.DeepCopy()
|
||||
node.Status = *hostNode.Status.DeepCopy()
|
||||
node.Labels = hostNode.GetLabels()
|
||||
node.Annotations = hostNode.GetAnnotations()
|
||||
node.Finalizers = hostNode.GetFinalizers()
|
||||
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
|
||||
} else {
|
||||
node.Status.Conditions = nodeConditions()
|
||||
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
|
||||
node.Status.Addresses = []corev1.NodeAddress{
|
||||
{
|
||||
Type: corev1.NodeHostName,
|
||||
Address: hostname,
|
||||
},
|
||||
{
|
||||
Type: corev1.NodeInternalIP,
|
||||
Address: ip,
|
||||
},
|
||||
}
|
||||
|
||||
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
|
||||
node.Labels["kubernetes.io/os"] = "linux"
|
||||
|
||||
// configure versions
|
||||
node.Status.NodeInfo.KubeletVersion = version
|
||||
|
||||
updateNodeCapacityInterval := 10 * time.Second
|
||||
ticker := time.NewTicker(updateNodeCapacityInterval)
|
||||
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
|
||||
logger.Error("error updating node capacity", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// nodeConditions returns the basic conditions which mark the node as ready
|
||||
func nodeConditions() []v1.NodeCondition {
|
||||
return []v1.NodeCondition{
|
||||
func nodeConditions() []corev1.NodeCondition {
|
||||
return []corev1.NodeCondition{
|
||||
{
|
||||
Type: "Ready",
|
||||
Status: v1.ConditionTrue,
|
||||
Status: corev1.ConditionTrue,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletReady",
|
||||
@@ -60,7 +76,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
},
|
||||
{
|
||||
Type: "OutOfDisk",
|
||||
Status: v1.ConditionFalse,
|
||||
Status: corev1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletHasSufficientDisk",
|
||||
@@ -68,7 +84,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
},
|
||||
{
|
||||
Type: "MemoryPressure",
|
||||
Status: v1.ConditionFalse,
|
||||
Status: corev1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletHasSufficientMemory",
|
||||
@@ -76,7 +92,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
},
|
||||
{
|
||||
Type: "DiskPressure",
|
||||
Status: v1.ConditionFalse,
|
||||
Status: corev1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "KubeletHasNoDiskPressure",
|
||||
@@ -84,7 +100,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
},
|
||||
{
|
||||
Type: "NetworkUnavailable",
|
||||
Status: v1.ConditionFalse,
|
||||
Status: corev1.ConditionFalse,
|
||||
LastHeartbeatTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "RouteCreated",
|
||||
@@ -95,9 +111,7 @@ func nodeConditions() []v1.NodeCondition {
|
||||
|
||||
// updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes.
|
||||
// If the nodeLabels are specified only the matching nodes will be considered.
|
||||
func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
func updateNodeCapacity(ctx context.Context, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error {
|
||||
capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -116,7 +130,7 @@ func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client
|
||||
|
||||
// getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources.
|
||||
// If some node labels are specified only the matching nodes will be considered.
|
||||
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) {
|
||||
func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (corev1.ResourceList, corev1.ResourceList, error) {
|
||||
listOpts := metav1.ListOptions{}
|
||||
|
||||
if nodeLabels != nil {
|
||||
|
||||
@@ -3,6 +3,7 @@ package provider
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
@@ -12,7 +13,31 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
|
||||
@@ -20,32 +45,6 @@ import (
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"errors"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
// check at compile time if the Provider implements the nodeutil.Provider interface
|
||||
@@ -67,9 +66,7 @@ type Provider struct {
|
||||
logger *k3klog.Logger
|
||||
}
|
||||
|
||||
var (
|
||||
ErrRetryTimeout = errors.New("provider timed out")
|
||||
)
|
||||
var ErrRetryTimeout = errors.New("provider timed out")
|
||||
|
||||
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
|
||||
coreClient, err := cv1.NewForConfig(&hostConfig)
|
||||
@@ -213,7 +210,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
|
||||
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
|
||||
p.logger.Debug("GetStatsSummary")
|
||||
|
||||
nodeList := &v1.NodeList{}
|
||||
nodeList := &corev1.NodeList{}
|
||||
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
|
||||
return nil, fmt.Errorf("unable to get nodes of cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
|
||||
}
|
||||
@@ -256,7 +253,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podsNameMap := make(map[string]*v1.Pod)
|
||||
podsNameMap := make(map[string]*corev1.Pod)
|
||||
|
||||
for _, pod := range pods {
|
||||
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
@@ -329,7 +326,6 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
|
||||
// should send a value on stopChannel so that the PortForward is stopped. However, we only have a ReadWriteCloser
|
||||
// so more work is needed to detect a close and handle that appropriately.
|
||||
fw, err := portforward.New(dialer, []string{portAsString}, stopChannel, readyChannel, stream, stream)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -373,12 +369,19 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
tPod.Spec.Hostname = k3kcontroller.SafeConcatName(pod.Name)
|
||||
}
|
||||
|
||||
// if the priorityCluss for the virtual cluster is set then override the provided value
|
||||
// if the priorityClass for the virtual cluster is set then override the provided value
|
||||
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
|
||||
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
tPod.Spec.Priority = nil
|
||||
if !strings.HasPrefix(tPod.Spec.PriorityClassName, "system-") {
|
||||
if tPod.Spec.PriorityClassName != "" {
|
||||
tPriorityClassName := p.Translator.TranslateName("", tPod.Spec.PriorityClassName)
|
||||
tPod.Spec.PriorityClassName = tPriorityClassName
|
||||
}
|
||||
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
tPod.Spec.Priority = nil
|
||||
}
|
||||
}
|
||||
|
||||
// fieldpath annotations
|
||||
@@ -412,7 +415,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
// withRetry retries passed function with interval and timeout
|
||||
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *v1.Pod) error, pod *v1.Pod) error {
|
||||
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev1.Pod) error, pod *corev1.Pod) error {
|
||||
const (
|
||||
interval = 2 * time.Second
|
||||
timeout = 10 * time.Second
|
||||
@@ -570,7 +573,7 @@ func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return p.withRetry(ctx, p.updatePod, pod)
|
||||
}
|
||||
|
||||
func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
p.logger.Debugw("got a request for update pod")
|
||||
|
||||
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
|
||||
@@ -578,7 +581,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
|
||||
// Update Pod in the virtual cluster
|
||||
|
||||
var currentVirtualPod v1.Pod
|
||||
var currentVirtualPod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, client.ObjectKeyFromObject(pod), ¤tVirtualPod); err != nil {
|
||||
return fmt.Errorf("unable to get pod to update from virtual cluster: %w", err)
|
||||
}
|
||||
@@ -642,7 +645,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *v1.Pod) error {
|
||||
}
|
||||
|
||||
// updateContainerImages will update the images of the original container images with the same name
|
||||
func updateContainerImages(original, updated []v1.Container) []v1.Container {
|
||||
func updateContainerImages(original, updated []corev1.Container) []corev1.Container {
|
||||
newImages := make(map[string]string)
|
||||
|
||||
for _, c := range updated {
|
||||
@@ -804,8 +807,8 @@ func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
|
||||
selector = selector.Add(*requirement)
|
||||
|
||||
var podList corev1.PodList
|
||||
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
|
||||
|
||||
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list pods: %w", err)
|
||||
}
|
||||
@@ -848,7 +851,7 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
|
||||
"svc.cluster.local",
|
||||
"cluster.local",
|
||||
},
|
||||
Options: []v1.PodDNSConfigOption{
|
||||
Options: []corev1.PodDNSConfigOption{
|
||||
{
|
||||
Name: "ndots",
|
||||
Value: ptr.To("5"),
|
||||
@@ -931,7 +934,7 @@ func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
|
||||
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
|
||||
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
|
||||
// assigned annotations
|
||||
func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
|
||||
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
|
||||
for _, container := range pod.Spec.EphemeralContainers {
|
||||
addFieldPathAnnotationToEnv(container.Env)
|
||||
}
|
||||
@@ -951,10 +954,10 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
|
||||
return err
|
||||
}
|
||||
// re-adding these envs to the pod
|
||||
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, v1.EnvVar{
|
||||
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, corev1.EnvVar{
|
||||
Name: envName,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: value,
|
||||
},
|
||||
},
|
||||
@@ -967,7 +970,7 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func addFieldPathAnnotationToEnv(envVars []v1.EnvVar) {
|
||||
func addFieldPathAnnotationToEnv(envVars []corev1.EnvVar) {
|
||||
for j, envVar := range envVars {
|
||||
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func Test_overrideEnvVars(t *testing.T) {
|
||||
@@ -22,42 +21,42 @@ func Test_overrideEnvVars(t *testing.T) {
|
||||
{
|
||||
name: "orig and new are empty",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{},
|
||||
new: []v1.EnvVar{},
|
||||
orig: []corev1.EnvVar{},
|
||||
new: []corev1.EnvVar{},
|
||||
},
|
||||
want: []v1.EnvVar{},
|
||||
want: []corev1.EnvVar{},
|
||||
},
|
||||
{
|
||||
name: "only orig is empty",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{},
|
||||
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
orig: []corev1.EnvVar{},
|
||||
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []v1.EnvVar{},
|
||||
want: []corev1.EnvVar{},
|
||||
},
|
||||
{
|
||||
name: "orig has a matching element",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{{Name: "FOO", Value: "old_val"}},
|
||||
new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
orig: []corev1.EnvVar{{Name: "FOO", Value: "old_val"}},
|
||||
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []v1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
want: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
{
|
||||
name: "orig have multiple elements",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
|
||||
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
{
|
||||
name: "orig and new have multiple elements and some not matching",
|
||||
args: args{
|
||||
orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
},
|
||||
want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -5,12 +5,14 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -144,7 +146,8 @@ func removeKubeAccessVolume(pod *corev1.Pod) {
|
||||
}
|
||||
|
||||
func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) {
|
||||
var tokenVolumeName = k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
|
||||
tokenVolumeName := k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix)
|
||||
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{
|
||||
Name: tokenVolumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
|
||||
@@ -4,8 +4,9 @@ import (
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
127
main.go
127
main.go
@@ -5,24 +5,26 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
"github.com/rancher/k3k/cli/cmds"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/buildinfo"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
"github.com/rancher/k3k/pkg/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.uber.org/zap"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -33,54 +35,11 @@ var (
|
||||
kubeconfig string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
kubeletPortRange string
|
||||
webhookPortRange string
|
||||
maxConcurrentReconciles int
|
||||
debug bool
|
||||
logger *log.Logger
|
||||
flags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "kubeconfig",
|
||||
EnvVars: []string{"KUBECONFIG"},
|
||||
Usage: "Kubeconfig path",
|
||||
Destination: &kubeconfig,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cluster-cidr",
|
||||
EnvVars: []string{"CLUSTER_CIDR"},
|
||||
Usage: "Cluster CIDR to be added to the networkpolicy",
|
||||
Destination: &clusterCIDR,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "shared-agent-image",
|
||||
EnvVars: []string{"SHARED_AGENT_IMAGE"},
|
||||
Usage: "K3K Virtual Kubelet image",
|
||||
Value: "rancher/k3k:latest",
|
||||
Destination: &sharedAgentImage,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "shared-agent-pull-policy",
|
||||
EnvVars: []string{"SHARED_AGENT_PULL_POLICY"},
|
||||
Usage: "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never",
|
||||
Destination: &sharedAgentImagePullPolicy,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
EnvVars: []string{"DEBUG"},
|
||||
Usage: "Debug level logging",
|
||||
Destination: &debug,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "k3s-image",
|
||||
EnvVars: []string{"K3S_IMAGE"},
|
||||
Usage: "K3K server image",
|
||||
Value: "rancher/k3k",
|
||||
Destination: &k3SImage,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "k3s-image-pull-policy",
|
||||
EnvVars: []string{"K3S_IMAGE_PULL_POLICY"},
|
||||
Usage: "K3K server image pull policy",
|
||||
Destination: &k3SImagePullPolicy,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -89,26 +48,37 @@ func init() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
app := cmds.NewApp()
|
||||
app.Flags = flags
|
||||
app.Action = run
|
||||
app.Version = buildinfo.Version
|
||||
app.Before = func(clx *cli.Context) error {
|
||||
if err := validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger = log.New(debug)
|
||||
|
||||
return nil
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "k3k",
|
||||
Short: "k3k controller",
|
||||
Version: buildinfo.Version,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return validate()
|
||||
},
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
cmds.InitializeConfig(cmd)
|
||||
logger = log.New(debug)
|
||||
},
|
||||
RunE: run,
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Debug level logging")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
|
||||
rootCmd.PersistentFlags().StringVar(&clusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
|
||||
rootCmd.PersistentFlags().StringVar(&sharedAgentImage, "shared-agent-image", "", "K3K Virtual Kubelet image")
|
||||
rootCmd.PersistentFlags().StringVar(&sharedAgentImagePullPolicy, "shared-agent-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&k3SImage, "k3s-image", "rancher/k3k", "K3K server image")
|
||||
rootCmd.PersistentFlags().StringVar(&k3SImagePullPolicy, "k3s-image-pull-policy", "", "K3K server image pull policy")
|
||||
rootCmd.PersistentFlags().IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 50, "maximum number of concurrent reconciles")
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
logger.Fatalw("failed to run k3k controller", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func run(clx *cli.Context) error {
|
||||
func run(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
logger.Info("Starting k3k - Version: " + buildinfo.Version)
|
||||
@@ -121,7 +91,6 @@ func run(clx *cli.Context) error {
|
||||
mgr, err := ctrl.NewManager(restConfig, manager.Options{
|
||||
Scheme: scheme,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
|
||||
}
|
||||
@@ -130,19 +99,29 @@ func run(clx *cli.Context) error {
|
||||
|
||||
logger.Info("adding cluster controller")
|
||||
|
||||
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy); err != nil {
|
||||
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runnable := portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), kubeletPortRange, webhookPortRange)
|
||||
if err := mgr.Add(runnable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy, maxConcurrentReconciles, portAllocator, nil); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding etcd pod controller")
|
||||
|
||||
if err := cluster.AddPodController(ctx, mgr); err != nil {
|
||||
if err := cluster.AddPodController(ctx, mgr, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("adding clusterpolicy controller")
|
||||
|
||||
if err := policy.Add(mgr, clusterCIDR); err != nil {
|
||||
if err := policy.Add(mgr, clusterCIDR, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
package k3k
|
||||
|
||||
var (
|
||||
GroupName = "k3k.io"
|
||||
)
|
||||
var GroupName = "k3k.io"
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
k3k "github.com/rancher/k3k/pkg/apis/k3k.io"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
k3k "github.com/rancher/k3k/pkg/apis/k3k.io"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.mode",name=Mode,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string"
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.policyName",name=Policy,type=string
|
||||
|
||||
// Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
@@ -28,6 +29,7 @@ type Cluster struct {
|
||||
|
||||
// Status reflects the observed state of the Cluster.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
// +optional
|
||||
Status ClusterStatus `json:"status,omitempty"`
|
||||
}
|
||||
@@ -39,7 +41,7 @@ type ClusterSpec struct {
|
||||
// If not specified, the Kubernetes version of the host node will be used.
|
||||
//
|
||||
// +optional
|
||||
Version string `json:"version"`
|
||||
Version string `json:"version,omitempty"`
|
||||
|
||||
// Mode specifies the cluster provisioning mode: "shared" or "virtual".
|
||||
// Defaults to "shared". This field is immutable.
|
||||
@@ -95,8 +97,8 @@ type ClusterSpec struct {
|
||||
// Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence.
|
||||
// A default StorageClass is required for dynamic persistence.
|
||||
//
|
||||
// +kubebuilder:default={type: "dynamic"}
|
||||
Persistence PersistenceConfig `json:"persistence,omitempty"`
|
||||
// +optional
|
||||
Persistence PersistenceConfig `json:"persistence"`
|
||||
|
||||
// Expose specifies options for exposing the API server.
|
||||
// By default, it's only exposed as a ClusterIP.
|
||||
@@ -120,7 +122,7 @@ type ClusterSpec struct {
|
||||
// The Secret must have a "token" field in its data.
|
||||
//
|
||||
// +optional
|
||||
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef"`
|
||||
TokenSecretRef *v1.SecretReference `json:"tokenSecretRef,omitempty"`
|
||||
|
||||
// TLSSANs specifies subject alternative names for the K3s server certificate.
|
||||
//
|
||||
@@ -163,6 +165,17 @@ type ClusterSpec struct {
|
||||
//
|
||||
// +optional
|
||||
WorkerLimit v1.ResourceList `json:"workerLimit,omitempty"`
|
||||
|
||||
// MirrorHostNodes controls whether node objects from the host cluster
|
||||
// are mirrored into the virtual cluster.
|
||||
//
|
||||
// +optional
|
||||
MirrorHostNodes bool `json:"mirrorHostNodes,omitempty"`
|
||||
|
||||
// CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
//
|
||||
// +optional
|
||||
CustomCAs CustomCAs `json:"customCAs,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterMode is the possible provisioning mode of a Cluster.
|
||||
@@ -206,7 +219,7 @@ type PersistenceConfig struct {
|
||||
// Type specifies the persistence mode.
|
||||
//
|
||||
// +kubebuilder:default="dynamic"
|
||||
Type PersistenceMode `json:"type"`
|
||||
Type PersistenceMode `json:"type,omitempty"`
|
||||
|
||||
// StorageClassName is the name of the StorageClass to use for the PVC.
|
||||
// This field is only relevant in "dynamic" mode.
|
||||
@@ -217,6 +230,7 @@ type PersistenceConfig struct {
|
||||
// StorageRequestSize is the requested size for the PVC.
|
||||
// This field is only relevant in "dynamic" mode.
|
||||
//
|
||||
// +kubebuilder:default="1G"
|
||||
// +optional
|
||||
StorageRequestSize string `json:"storageRequestSize,omitempty"`
|
||||
}
|
||||
@@ -286,6 +300,48 @@ type NodePortConfig struct {
|
||||
ETCDPort *int32 `json:"etcdPort,omitempty"`
|
||||
}
|
||||
|
||||
// CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
type CustomCAs struct {
|
||||
// Enabled toggles this feature on or off.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Sources defines the sources for all required custom CA certificates.
|
||||
Sources CredentialSources `json:"sources,omitempty"`
|
||||
}
|
||||
|
||||
// CredentialSources lists all the required credentials, including both
|
||||
// TLS key pairs and single signing keys.
|
||||
type CredentialSources struct {
|
||||
// ServerCA specifies the server-ca cert/key pair.
|
||||
ServerCA CredentialSource `json:"serverCA,omitempty"`
|
||||
|
||||
// ClientCA specifies the client-ca cert/key pair.
|
||||
ClientCA CredentialSource `json:"clientCA,omitempty"`
|
||||
|
||||
// RequestHeaderCA specifies the request-header-ca cert/key pair.
|
||||
RequestHeaderCA CredentialSource `json:"requestHeaderCA,omitempty"`
|
||||
|
||||
// ETCDServerCA specifies the etcd-server-ca cert/key pair.
|
||||
ETCDServerCA CredentialSource `json:"etcdServerCA,omitempty"`
|
||||
|
||||
// ETCDPeerCA specifies the etcd-peer-ca cert/key pair.
|
||||
ETCDPeerCA CredentialSource `json:"etcdPeerCA,omitempty"`
|
||||
|
||||
// ServiceAccountToken specifies the service-account-token key.
|
||||
ServiceAccountToken CredentialSource `json:"serviceAccountToken,omitempty"`
|
||||
}
|
||||
|
||||
// CredentialSource defines where to get a credential from.
|
||||
// It can represent either a TLS key pair or a single private key.
|
||||
type CredentialSource struct {
|
||||
// SecretName specifies the name of an existing secret to use.
|
||||
// The controller expects specific keys inside based on the credential type:
|
||||
// - For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
// - For ServiceAccountTokenKey: 'tls.key'.
|
||||
// +optional
|
||||
SecretName string `json:"secretName,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterStatus reflects the observed state of a Cluster.
|
||||
type ClusterStatus struct {
|
||||
// HostVersion is the Kubernetes version of the host node.
|
||||
@@ -313,17 +369,46 @@ type ClusterStatus struct {
|
||||
// +optional
|
||||
TLSSANs []string `json:"tlsSANs,omitempty"`
|
||||
|
||||
// Persistence specifies options for persisting etcd data.
|
||||
//
|
||||
// +optional
|
||||
Persistence PersistenceConfig `json:"persistence,omitempty"`
|
||||
|
||||
// PolicyName specifies the virtual cluster policy name bound to the virtual cluster.
|
||||
//
|
||||
// +optional
|
||||
PolicyName string `json:"policyName,omitempty"`
|
||||
|
||||
// KubeletPort specefies the port used by k3k-kubelet in shared mode.
|
||||
//
|
||||
// +optional
|
||||
KubeletPort int `json:"kubeletPort,omitempty"`
|
||||
|
||||
// WebhookPort specefies the port used by webhook in k3k-kubelet in shared mode.
|
||||
//
|
||||
// +optional
|
||||
WebhookPort int `json:"webhookPort,omitempty"`
|
||||
|
||||
// Conditions are the individual conditions for the cluster set.
|
||||
//
|
||||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
|
||||
// Phase is a high-level summary of the cluster's current lifecycle state.
|
||||
//
|
||||
// +kubebuilder:default="Unknown"
|
||||
// +kubebuilder:validation:Enum=Pending;Provisioning;Ready;Failed;Terminating;Unknown
|
||||
// +optional
|
||||
Phase ClusterPhase `json:"phase,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.
|
||||
type ClusterPhase string
|
||||
|
||||
const (
|
||||
ClusterPending = ClusterPhase("Pending")
|
||||
ClusterProvisioning = ClusterPhase("Provisioning")
|
||||
ClusterReady = ClusterPhase("Ready")
|
||||
ClusterFailed = ClusterPhase("Failed")
|
||||
ClusterTerminating = ClusterPhase("Terminating")
|
||||
ClusterUnknown = ClusterPhase("Unknown")
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
@@ -362,7 +447,6 @@ type VirtualClusterPolicy struct {
|
||||
|
||||
// VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
|
||||
type VirtualClusterPolicySpec struct {
|
||||
|
||||
// Quota specifies the resource limits for clusters within a clusterpolicy.
|
||||
//
|
||||
// +optional
|
||||
|
||||
@@ -163,6 +163,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
out.CustomCAs = in.CustomCAs
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
|
||||
@@ -183,7 +184,13 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Persistence.DeepCopyInto(&out.Persistence)
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
|
||||
@@ -196,6 +203,58 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CredentialSource) DeepCopyInto(out *CredentialSource) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialSource.
|
||||
func (in *CredentialSource) DeepCopy() *CredentialSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CredentialSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CredentialSources) DeepCopyInto(out *CredentialSources) {
|
||||
*out = *in
|
||||
out.ServerCA = in.ServerCA
|
||||
out.ClientCA = in.ClientCA
|
||||
out.RequestHeaderCA = in.RequestHeaderCA
|
||||
out.ETCDServerCA = in.ETCDServerCA
|
||||
out.ETCDPeerCA = in.ETCDPeerCA
|
||||
out.ServiceAccountToken = in.ServiceAccountToken
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialSources.
|
||||
func (in *CredentialSources) DeepCopy() *CredentialSources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CredentialSources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CustomCAs) DeepCopyInto(out *CustomCAs) {
|
||||
*out = *in
|
||||
out.Sources = in.Sources
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCAs.
|
||||
func (in *CustomCAs) DeepCopy() *CustomCAs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CustomCAs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExposeConfig) DeepCopyInto(out *ExposeConfig) {
|
||||
*out = *in
|
||||
|
||||
@@ -4,13 +4,15 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
253
pkg/controller/cluster/agent/ports.go
Normal file
253
pkg/controller/cluster/agent/ports.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
kubeletPortRangeConfigMapName = "k3k-kubelet-port-range"
|
||||
webhookPortRangeConfigMapName = "k3k-webhook-port-range"
|
||||
|
||||
rangeKey = "range"
|
||||
allocatedPortsKey = "allocatedPorts"
|
||||
snapshotDataKey = "snapshotData"
|
||||
)
|
||||
|
||||
type PortAllocator struct {
|
||||
ctrlruntimeclient.Client
|
||||
|
||||
KubeletCM *v1.ConfigMap
|
||||
WebhookCM *v1.ConfigMap
|
||||
}
|
||||
|
||||
func NewPortAllocator(ctx context.Context, client ctrlruntimeclient.Client) (*PortAllocator, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("starting port allocator")
|
||||
|
||||
portRangeConfigMapNamespace := os.Getenv("CONTROLLER_NAMESPACE")
|
||||
if portRangeConfigMapNamespace == "" {
|
||||
return nil, fmt.Errorf("failed to find k3k controller namespace")
|
||||
}
|
||||
|
||||
var kubeletPortRangeCM, webhookPortRangeCM v1.ConfigMap
|
||||
|
||||
kubeletPortRangeCM.Name = kubeletPortRangeConfigMapName
|
||||
kubeletPortRangeCM.Namespace = portRangeConfigMapNamespace
|
||||
|
||||
webhookPortRangeCM.Name = webhookPortRangeConfigMapName
|
||||
webhookPortRangeCM.Namespace = portRangeConfigMapNamespace
|
||||
|
||||
return &PortAllocator{
|
||||
Client: client,
|
||||
KubeletCM: &kubeletPortRangeCM,
|
||||
WebhookCM: &webhookPortRangeCM,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *PortAllocator) InitPortAllocatorConfig(ctx context.Context, client ctrlruntimeclient.Client, kubeletPortRange, webhookPortRange string) manager.Runnable {
|
||||
return manager.RunnableFunc(func(ctx context.Context) error {
|
||||
if err := a.getOrCreate(ctx, a.KubeletCM, kubeletPortRange); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := a.getOrCreate(ctx, a.WebhookCM, webhookPortRange); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (a *PortAllocator) getOrCreate(ctx context.Context, configmap *v1.ConfigMap, portRange string) error {
|
||||
nn := types.NamespacedName{
|
||||
Name: configmap.Name,
|
||||
Namespace: configmap.Namespace,
|
||||
}
|
||||
|
||||
if err := a.Get(ctx, nn, configmap); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// creating the configMap for the first time
|
||||
configmap.Data = map[string]string{
|
||||
rangeKey: portRange,
|
||||
allocatedPortsKey: "",
|
||||
}
|
||||
configmap.BinaryData = map[string][]byte{
|
||||
snapshotDataKey: []byte(""),
|
||||
}
|
||||
|
||||
if err := a.Create(ctx, configmap); err != nil {
|
||||
return fmt.Errorf("failed to create port range configmap: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *PortAllocator) AllocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
|
||||
return a.allocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM)
|
||||
}
|
||||
|
||||
func (a *PortAllocator) DeallocateWebhookPort(ctx context.Context, clusterName, clusterNamespace string, webhookPort int) error {
|
||||
return a.deallocatePort(ctx, clusterName, clusterNamespace, a.WebhookCM, webhookPort)
|
||||
}
|
||||
|
||||
func (a *PortAllocator) AllocateKubeletPort(ctx context.Context, clusterName, clusterNamespace string) (int, error) {
|
||||
return a.allocatePort(ctx, clusterName, clusterNamespace, a.KubeletCM)
|
||||
}
|
||||
|
||||
func (a *PortAllocator) DeallocateKubeletPort(ctx context.Context, clusterName, clusterNamespace string, kubeletPort int) error {
|
||||
return a.deallocatePort(ctx, clusterName, clusterNamespace, a.KubeletCM, kubeletPort)
|
||||
}
|
||||
|
||||
// allocatePort will assign port to the cluster from a port Range configured for k3k
|
||||
func (a *PortAllocator) allocatePort(ctx context.Context, clusterName, clusterNamespace string, configMap *v1.ConfigMap) (int, error) {
|
||||
portRange, ok := configMap.Data[rangeKey]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("port range is not initialized")
|
||||
}
|
||||
|
||||
// get configMap first to avoid conflicts
|
||||
if err := a.getOrCreate(ctx, configMap, portRange); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
clusterNamespaceName := clusterNamespace + "/" + clusterName
|
||||
|
||||
portsMap, err := parsePortMap(configMap.Data[allocatedPortsKey])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if _, ok := portsMap[clusterNamespaceName]; ok {
|
||||
return portsMap[clusterNamespaceName], nil
|
||||
}
|
||||
// allocate a new port and save the snapshot
|
||||
snapshot := core.RangeAllocation{
|
||||
Range: configMap.Data[rangeKey],
|
||||
Data: configMap.BinaryData[snapshotDataKey],
|
||||
}
|
||||
|
||||
pa, err := portallocator.NewFromSnapshot(&snapshot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
next, err := pa.AllocateNext()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
portsMap[clusterNamespaceName] = next
|
||||
|
||||
if err := saveSnapshot(pa, &snapshot, configMap, portsMap); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := a.Update(ctx, configMap); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return next, nil
|
||||
}
|
||||
|
||||
// deallocatePort will remove the port used by the cluster from the port range
|
||||
func (a *PortAllocator) deallocatePort(ctx context.Context, clusterName, clusterNamespace string, configMap *v1.ConfigMap, port int) error {
|
||||
portRange, ok := configMap.Data[rangeKey]
|
||||
if !ok {
|
||||
return fmt.Errorf("port range is not initialized")
|
||||
}
|
||||
|
||||
if err := a.getOrCreate(ctx, configMap, portRange); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clusterNamespaceName := clusterNamespace + "/" + clusterName
|
||||
|
||||
portsMap, err := parsePortMap(configMap.Data[allocatedPortsKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// check if the cluster already exists in the configMap
|
||||
if usedPort, ok := portsMap[clusterNamespaceName]; ok {
|
||||
if usedPort != port {
|
||||
return fmt.Errorf("port %d does not match used port %d for the cluster", port, usedPort)
|
||||
}
|
||||
|
||||
snapshot := core.RangeAllocation{
|
||||
Range: configMap.Data[rangeKey],
|
||||
Data: configMap.BinaryData[snapshotDataKey],
|
||||
}
|
||||
|
||||
pa, err := portallocator.NewFromSnapshot(&snapshot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pa.Release(port); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(portsMap, clusterNamespaceName)
|
||||
|
||||
if err := saveSnapshot(pa, &snapshot, configMap, portsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return a.Update(ctx, configMap)
|
||||
}
|
||||
|
||||
// parsePortMap will convert ConfigMap Data to a portMap of string keys and values of ints
|
||||
func parsePortMap(portMapData string) (map[string]int, error) {
|
||||
portMap := make(map[string]int)
|
||||
if err := yaml.Unmarshal([]byte(portMapData), &portMap); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse allocatedPorts: %w", err)
|
||||
}
|
||||
|
||||
return portMap, nil
|
||||
}
|
||||
|
||||
// serializePortMap will convert a portMap of string keys and values of ints to ConfigMap Data
|
||||
func serializePortMap(m map[string]int) (string, error) {
|
||||
out, err := yaml.Marshal(m)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to serialize allocatedPorts: %w", err)
|
||||
}
|
||||
|
||||
return string(out), nil
|
||||
}
|
||||
|
||||
func saveSnapshot(portAllocator *portallocator.PortAllocator, snapshot *core.RangeAllocation, configMap *v1.ConfigMap, portsMap map[string]int) error {
|
||||
// save the new snapshot
|
||||
if err := portAllocator.Snapshot(snapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
// update the configmap with the new portsMap and the new snapshot
|
||||
configMap.BinaryData[snapshotDataKey] = snapshot.Data
|
||||
configMap.Data[rangeKey] = snapshot.Range
|
||||
|
||||
allocatedPortsData, err := serializePortMap(portsMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configMap.Data[allocatedPortsKey] = allocatedPortsData
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -8,24 +8,25 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
)
|
||||
|
||||
const (
|
||||
sharedKubeletConfigPath = "/opt/rancher/k3k/config.yaml"
|
||||
SharedNodeAgentName = "kubelet"
|
||||
SharedNodeMode = "shared"
|
||||
SharedNodeAgentName = "kubelet"
|
||||
SharedNodeMode = "shared"
|
||||
)
|
||||
|
||||
type SharedAgent struct {
|
||||
@@ -34,15 +35,19 @@ type SharedAgent struct {
|
||||
image string
|
||||
imagePullPolicy string
|
||||
token string
|
||||
kubeletPort int
|
||||
webhookPort int
|
||||
}
|
||||
|
||||
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string) *SharedAgent {
|
||||
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int) *SharedAgent {
|
||||
return &SharedAgent{
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
image: image,
|
||||
imagePullPolicy: imagePullPolicy,
|
||||
token: token,
|
||||
kubeletPort: kubeletPort,
|
||||
webhookPort: webhookPort,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +77,7 @@ func (s *SharedAgent) ensureObject(ctx context.Context, obj ctrlruntimeclient.Ob
|
||||
}
|
||||
|
||||
func (s *SharedAgent) config(ctx context.Context) error {
|
||||
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP)
|
||||
config := sharedAgentData(s.cluster, s.Name(), s.token, s.serviceIP, s.kubeletPort, s.webhookPort)
|
||||
|
||||
configSecret := &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -91,7 +96,7 @@ func (s *SharedAgent) config(ctx context.Context) error {
|
||||
return s.ensureObject(ctx, configSecret)
|
||||
}
|
||||
|
||||
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string) string {
|
||||
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
|
||||
version := cluster.Spec.Version
|
||||
if cluster.Spec.Version == "" {
|
||||
version = cluster.Status.HostVersion
|
||||
@@ -101,9 +106,12 @@ func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string) s
|
||||
clusterNamespace: %s
|
||||
serverIP: %s
|
||||
serviceName: %s
|
||||
token: %s
|
||||
version: %s`,
|
||||
cluster.Name, cluster.Namespace, ip, serviceName, token, version)
|
||||
token: %v
|
||||
mirrorHostNodes: %t
|
||||
version: %s
|
||||
webhookPort: %d
|
||||
kubeletPort: %d`,
|
||||
cluster.Name, cluster.Namespace, ip, serviceName, token, cluster.Spec.MirrorHostNodes, version, webhookPort, kubeletPort)
|
||||
}
|
||||
|
||||
func (s *SharedAgent) daemonset(ctx context.Context) error {
|
||||
@@ -140,7 +148,17 @@ func (s *SharedAgent) daemonset(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
hostNetwork := false
|
||||
dnsPolicy := v1.DNSClusterFirst
|
||||
|
||||
if s.cluster.Spec.MirrorHostNodes {
|
||||
hostNetwork = true
|
||||
dnsPolicy = v1.DNSClusterFirstWithHostNet
|
||||
}
|
||||
|
||||
return v1.PodSpec{
|
||||
HostNetwork: hostNetwork,
|
||||
DNSPolicy: dnsPolicy,
|
||||
ServiceAccountName: s.Name(),
|
||||
NodeSelector: s.cluster.Spec.NodeSelector,
|
||||
Volumes: []v1.Volume{
|
||||
@@ -189,10 +207,6 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{},
|
||||
},
|
||||
Args: []string{
|
||||
"--config",
|
||||
sharedKubeletConfigPath,
|
||||
},
|
||||
Env: append([]v1.EnvVar{
|
||||
{
|
||||
Name: "AGENT_HOSTNAME",
|
||||
@@ -203,6 +217,15 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_IP",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "status.podIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, s.cluster.Spec.AgentEnvs...),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@@ -217,10 +240,15 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
},
|
||||
},
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "kubelet-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
ContainerPort: int32(s.kubeletPort),
|
||||
},
|
||||
{
|
||||
Name: "webhook-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
ContainerPort: 9443,
|
||||
ContainerPort: int32(s.webhookPort),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -249,13 +277,13 @@ func (s *SharedAgent) service(ctx context.Context) error {
|
||||
{
|
||||
Name: "k3s-kubelet-port",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 10250,
|
||||
Port: int32(s.kubeletPort),
|
||||
},
|
||||
{
|
||||
Name: "webhook-server",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 9443,
|
||||
TargetPort: intstr.FromInt32(9443),
|
||||
Port: int32(s.webhookPort),
|
||||
TargetPort: intstr.FromInt32(int32(s.webhookPort)),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -3,10 +3,12 @@ package agent
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func Test_sharedAgentData(t *testing.T) {
|
||||
@@ -14,6 +16,8 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
cluster *v1alpha1.Cluster
|
||||
serviceName string
|
||||
ip string
|
||||
kubeletPort int
|
||||
webhookPort int
|
||||
token string
|
||||
}
|
||||
|
||||
@@ -34,6 +38,8 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
Version: "v1.2.3",
|
||||
},
|
||||
},
|
||||
kubeletPort: 10250,
|
||||
webhookPort: 9443,
|
||||
ip: "10.0.0.21",
|
||||
serviceName: "service-name",
|
||||
token: "dnjklsdjnksd892389238",
|
||||
@@ -45,6 +51,9 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
"serviceName": "service-name",
|
||||
"token": "dnjklsdjnksd892389238",
|
||||
"version": "v1.2.3",
|
||||
"mirrorHostNodes": "false",
|
||||
"kubeletPort": "10250",
|
||||
"webhookPort": "9443",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -63,6 +72,8 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
},
|
||||
},
|
||||
ip: "10.0.0.21",
|
||||
kubeletPort: 10250,
|
||||
webhookPort: 9443,
|
||||
serviceName: "service-name",
|
||||
token: "dnjklsdjnksd892389238",
|
||||
},
|
||||
@@ -73,6 +84,9 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
"serviceName": "service-name",
|
||||
"token": "dnjklsdjnksd892389238",
|
||||
"version": "v1.2.3",
|
||||
"mirrorHostNodes": "false",
|
||||
"kubeletPort": "10250",
|
||||
"webhookPort": "9443",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -87,6 +101,8 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
HostVersion: "v1.3.3",
|
||||
},
|
||||
},
|
||||
kubeletPort: 10250,
|
||||
webhookPort: 9443,
|
||||
ip: "10.0.0.21",
|
||||
serviceName: "service-name",
|
||||
token: "dnjklsdjnksd892389238",
|
||||
@@ -98,13 +114,16 @@ func Test_sharedAgentData(t *testing.T) {
|
||||
"serviceName": "service-name",
|
||||
"token": "dnjklsdjnksd892389238",
|
||||
"version": "v1.3.3",
|
||||
"mirrorHostNodes": "false",
|
||||
"kubeletPort": "10250",
|
||||
"webhookPort": "9443",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip)
|
||||
config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip, tt.args.kubeletPort, tt.args.webhookPort)
|
||||
|
||||
data := make(map[string]string)
|
||||
err := yaml.Unmarshal([]byte(config), data)
|
||||
|
||||
@@ -5,12 +5,14 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -5,37 +5,41 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -45,28 +49,32 @@ const (
|
||||
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
|
||||
ClusterInvalidName = "system"
|
||||
|
||||
maxConcurrentReconciles = 1
|
||||
defaultVirtualClusterCIDR = "10.52.0.0/16"
|
||||
defaultVirtualServiceCIDR = "10.53.0.0/16"
|
||||
defaultSharedClusterCIDR = "10.42.0.0/16"
|
||||
defaultSharedServiceCIDR = "10.43.0.0/16"
|
||||
memberRemovalTimeout = time.Minute * 1
|
||||
)
|
||||
|
||||
defaultVirtualClusterCIDR = "10.52.0.0/16"
|
||||
defaultVirtualServiceCIDR = "10.53.0.0/16"
|
||||
defaultSharedClusterCIDR = "10.42.0.0/16"
|
||||
defaultSharedServiceCIDR = "10.43.0.0/16"
|
||||
defaultStoragePersistentSize = "1G"
|
||||
memberRemovalTimeout = time.Minute * 1
|
||||
var (
|
||||
ErrClusterValidation = errors.New("cluster validation error")
|
||||
ErrCustomCACertSecretMissing = errors.New("custom CA certificate secret is missing")
|
||||
)
|
||||
|
||||
type ClusterReconciler struct {
|
||||
DiscoveryClient *discovery.DiscoveryClient
|
||||
Client ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
DiscoveryClient *discovery.DiscoveryClient
|
||||
Client client.Client
|
||||
Scheme *runtime.Scheme
|
||||
record.EventRecorder
|
||||
SharedAgentImage string
|
||||
SharedAgentImagePullPolicy string
|
||||
K3SImage string
|
||||
K3SImagePullPolicy string
|
||||
PortAllocator *agent.PortAllocator
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string, k3SImage string, k3SImagePullPolicy string) error {
|
||||
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage string, k3SImagePullPolicy string, maxConcurrentReconciles int, portAllocator *agent.PortAllocator, eventRecorder record.EventRecorder) error {
|
||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -76,15 +84,21 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
|
||||
return errors.New("missing shared agent image")
|
||||
}
|
||||
|
||||
if eventRecorder == nil {
|
||||
eventRecorder = mgr.GetEventRecorderFor(clusterController)
|
||||
}
|
||||
|
||||
// initialize a new Reconciler
|
||||
reconciler := ClusterReconciler{
|
||||
DiscoveryClient: discoveryClient,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
EventRecorder: eventRecorder,
|
||||
SharedAgentImage: sharedAgentImage,
|
||||
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
|
||||
K3SImage: k3SImage,
|
||||
K3SImagePullPolicy: k3SImagePullPolicy,
|
||||
PortAllocator: portAllocator,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
@@ -92,6 +106,7 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
|
||||
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
|
||||
Owns(&apps.StatefulSet{}).
|
||||
Owns(&v1.Service{}).
|
||||
WithOptions(ctrlcontroller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
@@ -138,27 +153,45 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// if DeletionTimestamp is not Zero -> finalize the object
|
||||
if !cluster.DeletionTimestamp.IsZero() {
|
||||
return c.finalizeCluster(ctx, cluster)
|
||||
return c.finalizeCluster(ctx, &cluster)
|
||||
}
|
||||
|
||||
// add finalizers
|
||||
if !controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
|
||||
// Set initial status if not already set
|
||||
if cluster.Status.Phase == "" || cluster.Status.Phase == v1alpha1.ClusterUnknown {
|
||||
cluster.Status.Phase = v1alpha1.ClusterProvisioning
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: ReasonProvisioning,
|
||||
Message: "Cluster is being provisioned",
|
||||
})
|
||||
|
||||
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
// add finalizer
|
||||
if controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
|
||||
if err := c.Client.Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
orig := cluster.DeepCopy()
|
||||
|
||||
reconcilerErr := c.reconcileCluster(ctx, &cluster)
|
||||
|
||||
// update Status if needed
|
||||
if !reflect.DeepEqual(orig.Status, cluster.Status) {
|
||||
if !equality.Semantic.DeepEqual(orig.Status, cluster.Status) {
|
||||
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -175,7 +208,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
}
|
||||
|
||||
// update Cluster if needed
|
||||
if !reflect.DeepEqual(orig.Spec, cluster.Spec) {
|
||||
if !equality.Semantic.DeepEqual(orig.Spec, cluster.Spec) {
|
||||
if err := c.Client.Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -185,8 +218,34 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
err := c.reconcile(ctx, cluster)
|
||||
c.updateStatus(cluster, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
var ns v1.Namespace
|
||||
if err := c.Client.Get(ctx, client.ObjectKey{Name: cluster.Namespace}, &ns); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyName, found := ns.Labels[policy.PolicyNameLabelKey]
|
||||
cluster.Status.PolicyName = policyName
|
||||
|
||||
if found && policyName != "" {
|
||||
var policy v1alpha1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, client.ObjectKey{Name: policyName}, &policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.validate(cluster, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// if the Version is not specified we will try to use the same Kubernetes version of the host.
|
||||
// This version is stored in the Status object, and it will not be updated if already set.
|
||||
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
|
||||
@@ -202,24 +261,12 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
|
||||
cluster.Status.HostVersion = k8sVersion + "-k3s1"
|
||||
}
|
||||
|
||||
// TODO: update status?
|
||||
if err := c.validate(cluster); err != nil {
|
||||
log.Error(err, "invalid change")
|
||||
return nil
|
||||
}
|
||||
|
||||
token, err := c.token(ctx, cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := server.New(cluster, c.Client, token, string(cluster.Spec.Mode), c.K3SImage, c.K3SImagePullPolicy)
|
||||
|
||||
cluster.Status.Persistence = cluster.Spec.Persistence
|
||||
if cluster.Spec.Persistence.StorageRequestSize == "" {
|
||||
// default to 1G of request size
|
||||
cluster.Status.Persistence.StorageRequestSize = defaultStoragePersistentSize
|
||||
}
|
||||
s := server.New(cluster, c.Client, token, c.K3SImage, c.K3SImagePullPolicy)
|
||||
|
||||
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
|
||||
if cluster.Status.ClusterCIDR == "" {
|
||||
@@ -236,7 +283,6 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
|
||||
log.Info("looking up Service CIDR for shared mode")
|
||||
|
||||
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err, "error while looking up Cluster Service CIDR")
|
||||
|
||||
@@ -252,14 +298,6 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
|
||||
}
|
||||
}
|
||||
|
||||
var ns v1.Namespace
|
||||
if err := c.Client.Get(ctx, client.ObjectKey{Name: cluster.Namespace}, &ns); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyName := ns.Labels[policy.PolicyNameLabelKey]
|
||||
cluster.Status.PolicyName = policyName
|
||||
|
||||
if err := c.ensureNetworkPolicy(ctx, cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -291,11 +329,11 @@ func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alp
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ensureKubeconfigSecret(ctx, cluster, serviceIP, token); err != nil {
|
||||
if err := c.ensureKubeconfigSecret(ctx, cluster, serviceIP, 443); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.bindNodeProxyClusterRole(ctx, cluster)
|
||||
return c.bindClusterRoles(ctx, cluster)
|
||||
}
|
||||
|
||||
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
|
||||
@@ -331,13 +369,13 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
|
||||
}
|
||||
|
||||
// ensureKubeconfigSecret will create or update the Secret containing the kubeconfig data from the k3s server
|
||||
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
|
||||
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string, port int) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring kubeconfig secret")
|
||||
|
||||
adminKubeconfig := kubeconfig.New()
|
||||
|
||||
kubeconfig, err := adminKubeconfig.Generate(ctx, c.Client, cluster, serviceIP)
|
||||
kubeconfig, err := adminKubeconfig.Generate(ctx, c.Client, cluster, serviceIP, port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -409,7 +447,7 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring network policy")
|
||||
|
||||
networkPolicyName := k3kcontroller.SafeConcatNameWithPrefix(cluster.Name)
|
||||
networkPolicyName := controller.SafeConcatNameWithPrefix(cluster.Name)
|
||||
|
||||
// network policies are managed by the Policy -> delete the one created as a standalone cluster
|
||||
if cluster.Status.PolicyName != "" {
|
||||
@@ -420,12 +458,12 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
|
||||
},
|
||||
}
|
||||
|
||||
return ctrlruntimeclient.IgnoreNotFound(c.Client.Delete(ctx, netpol))
|
||||
return client.IgnoreNotFound(c.Client.Delete(ctx, netpol))
|
||||
}
|
||||
|
||||
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(cluster.Name),
|
||||
Name: controller.SafeConcatNameWithPrefix(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -475,6 +513,7 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
|
||||
}
|
||||
|
||||
currentNetworkPolicy := expectedNetworkPolicy.DeepCopy()
|
||||
|
||||
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentNetworkPolicy, func() error {
|
||||
if err := controllerutil.SetControllerReference(cluster, currentNetworkPolicy, c.Scheme); err != nil {
|
||||
return err
|
||||
@@ -484,7 +523,6 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -502,8 +540,8 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
|
||||
log.Info("ensuring cluster service")
|
||||
|
||||
expectedService := server.Service(cluster)
|
||||
|
||||
currentService := expectedService.DeepCopy()
|
||||
|
||||
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentService, func() error {
|
||||
if err := controllerutil.SetControllerReference(cluster, currentService, c.Scheme); err != nil {
|
||||
return err
|
||||
@@ -513,7 +551,6 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -539,6 +576,7 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
|
||||
}
|
||||
|
||||
currentServerIngress := expectedServerIngress.DeepCopy()
|
||||
|
||||
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerIngress, func() error {
|
||||
if err := controllerutil.SetControllerReference(cluster, currentServerIngress, c.Scheme); err != nil {
|
||||
return err
|
||||
@@ -549,7 +587,6 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -601,31 +638,34 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) bindNodeProxyClusterRole(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
|
||||
if err := c.Client.Get(ctx, types.NamespacedName{Name: "k3k-node-proxy"}, clusterRoleBinding); err != nil {
|
||||
return fmt.Errorf("failed to get or find k3k-node-proxy ClusterRoleBinding: %w", err)
|
||||
}
|
||||
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
|
||||
|
||||
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
|
||||
var err error
|
||||
|
||||
found := false
|
||||
for _, clusterRole := range clusterRoles {
|
||||
var clusterRoleBinding rbacv1.ClusterRoleBinding
|
||||
if getErr := c.Client.Get(ctx, types.NamespacedName{Name: clusterRole}, &clusterRoleBinding); getErr != nil {
|
||||
err = errors.Join(err, fmt.Errorf("failed to get or find %s ClusterRoleBinding: %w", clusterRole, getErr))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, subject := range clusterRoleBinding.Subjects {
|
||||
if subject.Name == subjectName && subject.Namespace == cluster.Namespace {
|
||||
found = true
|
||||
clusterSubject := rbacv1.Subject{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
if !slices.Contains(clusterRoleBinding.Subjects, clusterSubject) {
|
||||
clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, clusterSubject)
|
||||
|
||||
if updateErr := c.Client.Update(ctx, &clusterRoleBinding); updateErr != nil {
|
||||
err = errors.Join(err, fmt.Errorf("failed to update %s ClusterRoleBinding: %w", clusterRole, updateErr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Name: subjectName,
|
||||
Namespace: cluster.Namespace,
|
||||
})
|
||||
}
|
||||
|
||||
return c.Client.Update(ctx, clusterRoleBinding)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
|
||||
@@ -635,15 +675,47 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
|
||||
if cluster.Spec.Mode == agent.VirtualNodeMode {
|
||||
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.K3SImage, c.K3SImagePullPolicy)
|
||||
} else {
|
||||
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token)
|
||||
// Assign port from pool if shared agent enabled mirroring of host nodes
|
||||
kubeletPort := 10250
|
||||
webhookPort := 9443
|
||||
|
||||
if cluster.Spec.MirrorHostNodes {
|
||||
var err error
|
||||
|
||||
kubeletPort, err = c.PortAllocator.AllocateKubeletPort(ctx, cluster.Name, cluster.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cluster.Status.KubeletPort = kubeletPort
|
||||
|
||||
webhookPort, err = c.PortAllocator.AllocateWebhookPort(ctx, cluster.Name, cluster.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cluster.Status.WebhookPort = webhookPort
|
||||
}
|
||||
|
||||
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort)
|
||||
}
|
||||
|
||||
return agentEnsurer.EnsureResources(ctx)
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster) error {
|
||||
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.VirtualClusterPolicy) error {
|
||||
if cluster.Name == ClusterInvalidName {
|
||||
return errors.New("invalid cluster name " + cluster.Name + " no action will be taken")
|
||||
return fmt.Errorf("%w: invalid cluster name %q", ErrClusterValidation, cluster.Name)
|
||||
}
|
||||
|
||||
if cluster.Spec.Mode != policy.Spec.AllowedMode {
|
||||
return fmt.Errorf("%w: mode %q is not allowed by the policy %q", ErrClusterValidation, cluster.Spec.Mode, policy.Name)
|
||||
}
|
||||
|
||||
if cluster.Spec.CustomCAs.Enabled {
|
||||
if err := c.validateCustomCACerts(cluster); err != nil {
|
||||
return fmt.Errorf("%w: %w", ErrClusterValidation, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -686,11 +758,11 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
|
||||
log.Info("looking up serviceCIDR from kube-apiserver pod")
|
||||
|
||||
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{
|
||||
matchingLabels := client.MatchingLabels(map[string]string{
|
||||
"component": "kube-apiserver",
|
||||
"tier": "control-plane",
|
||||
})
|
||||
listOpts := &ctrlruntimeclient.ListOptions{Namespace: "kube-system"}
|
||||
listOpts := &client.ListOptions{Namespace: "kube-system"}
|
||||
matchingLabels.ApplyToList(listOpts)
|
||||
|
||||
var podList v1.PodList
|
||||
@@ -725,3 +797,18 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// validateCustomCACerts will make sure that all the cert secrets exists
|
||||
func (c *ClusterReconciler) validateCustomCACerts(cluster *v1alpha1.Cluster) error {
|
||||
credentialSources := cluster.Spec.CustomCAs.Sources
|
||||
if credentialSources.ClientCA.SecretName == "" ||
|
||||
credentialSources.ServerCA.SecretName == "" ||
|
||||
credentialSources.ETCDPeerCA.SecretName == "" ||
|
||||
credentialSources.ETCDServerCA.SecretName == "" ||
|
||||
credentialSources.RequestHeaderCA.SecretName == "" ||
|
||||
credentialSources.ServiceAccountToken.SecretName == "" {
|
||||
return ErrCustomCACertSecretMissing
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,54 +2,58 @@ package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster v1alpha1.Cluster) (reconcile.Result, error) {
|
||||
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alpha1.Cluster) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("finalizing Cluster")
|
||||
|
||||
// remove finalizer from the server pods and update them.
|
||||
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
|
||||
listOpts := &ctrlruntimeclient.ListOptions{Namespace: cluster.Namespace}
|
||||
matchingLabels.ApplyToList(listOpts)
|
||||
// Set the Terminating phase and condition
|
||||
cluster.Status.Phase = v1alpha1.ClusterTerminating
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: ReasonTerminating,
|
||||
Message: "Cluster is being terminated",
|
||||
})
|
||||
|
||||
var podList v1.PodList
|
||||
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName)
|
||||
|
||||
if err := c.Client.Update(ctx, &pod); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.unbindNodeProxyClusterRole(ctx, &cluster); err != nil {
|
||||
if err := c.unbindClusterRoles(ctx, cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
|
||||
// remove finalizer from the cluster and update it.
|
||||
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
|
||||
// Deallocate ports for kubelet and webhook if used
|
||||
if cluster.Spec.Mode == v1alpha1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
|
||||
log.Info("dellocating ports for kubelet and webhook")
|
||||
|
||||
if err := c.Client.Update(ctx, &cluster); err != nil {
|
||||
if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := c.PortAllocator.DeallocateWebhookPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.WebhookPort); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove finalizer from the cluster and update it only when all resources are cleaned up
|
||||
if controllerutil.RemoveFinalizer(cluster, clusterFinalizerName) {
|
||||
if err := c.Client.Update(ctx, cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -57,28 +61,37 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster v1alpha
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
|
||||
if err := c.Client.Get(ctx, types.NamespacedName{Name: "k3k-node-proxy"}, clusterRoleBinding); err != nil {
|
||||
return fmt.Errorf("failed to get or find k3k-node-proxy ClusterRoleBinding: %w", err)
|
||||
}
|
||||
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
|
||||
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
|
||||
|
||||
subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName)
|
||||
var err error
|
||||
|
||||
var cleanedSubjects []rbacv1.Subject
|
||||
for _, clusterRole := range clusterRoles {
|
||||
var clusterRoleBinding rbacv1.ClusterRoleBinding
|
||||
if getErr := c.Client.Get(ctx, types.NamespacedName{Name: clusterRole}, &clusterRoleBinding); getErr != nil {
|
||||
err = errors.Join(err, fmt.Errorf("failed to get or find %s ClusterRoleBinding: %w", clusterRole, getErr))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, subject := range clusterRoleBinding.Subjects {
|
||||
if subject.Name != subjectName || subject.Namespace != cluster.Namespace {
|
||||
cleanedSubjects = append(cleanedSubjects, subject)
|
||||
clusterSubject := rbacv1.Subject{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
// remove the clusterSubject from the ClusterRoleBinding
|
||||
cleanedSubjects := slices.DeleteFunc(clusterRoleBinding.Subjects, func(subject rbacv1.Subject) bool {
|
||||
return reflect.DeepEqual(subject, clusterSubject)
|
||||
})
|
||||
|
||||
if !reflect.DeepEqual(clusterRoleBinding.Subjects, cleanedSubjects) {
|
||||
clusterRoleBinding.Subjects = cleanedSubjects
|
||||
|
||||
if updateErr := c.Client.Update(ctx, &clusterRoleBinding); updateErr != nil {
|
||||
err = errors.Join(err, fmt.Errorf("failed to update %s ClusterRoleBinding: %w", clusterRole, updateErr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if no subject was removed, all good
|
||||
if reflect.DeepEqual(clusterRoleBinding.Subjects, cleanedSubjects) {
|
||||
return nil
|
||||
}
|
||||
|
||||
clusterRoleBinding.Subjects = cleanedSubjects
|
||||
|
||||
return c.Client.Update(ctx, clusterRoleBinding)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,24 +2,25 @@ package cluster_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
|
||||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@@ -38,12 +39,15 @@ var (
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
}
|
||||
|
||||
// setting controller namespace env to activate port range allocator
|
||||
_ = os.Setenv("CONTROLLER_NAMESPACE", "default")
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@@ -59,8 +63,15 @@ var _ = BeforeSuite(func() {
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = mgr.Add(portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), "50000-51000", "51001-52000"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "", "rancher/k3s", "")
|
||||
|
||||
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "", "rancher/k3s", "", 50, portAllocator, &record.FakeRecorder{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
@@ -81,13 +92,7 @@ var _ = AfterSuite(func() {
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
err := corev1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = rbacv1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = appsv1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = networkingv1.AddToScheme(scheme)
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1alpha1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@@ -5,30 +5,31 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), func() {
|
||||
|
||||
Context("creating a Cluster", func() {
|
||||
|
||||
var (
|
||||
namespace string
|
||||
ctx context.Context
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx = context.Background()
|
||||
|
||||
createdNS := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
|
||||
err := k8sClient.Create(context.Background(), createdNS)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
@@ -36,11 +37,8 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
})
|
||||
|
||||
When("creating a Cluster", func() {
|
||||
|
||||
var cluster *v1alpha1.Cluster
|
||||
|
||||
BeforeEach(func() {
|
||||
cluster = &v1alpha1.Cluster{
|
||||
It("will be created with some defaults", func() {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
@@ -49,17 +47,18 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("will be created with some defaults", func() {
|
||||
Expect(cluster.Spec.Mode).To(Equal(v1alpha1.SharedClusterMode))
|
||||
Expect(cluster.Spec.Agents).To(Equal(ptr.To[int32](0)))
|
||||
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
|
||||
Expect(cluster.Spec.Version).To(BeEmpty())
|
||||
// TOFIX
|
||||
// Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
|
||||
|
||||
serverVersion, err := k8s.DiscoveryClient.ServerVersion()
|
||||
Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
|
||||
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("1G"))
|
||||
|
||||
Expect(cluster.Status.Phase).To(Equal(v1alpha1.ClusterUnknown))
|
||||
|
||||
serverVersion, err := k8s.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
expectedHostVersion := fmt.Sprintf("%s-k3s1", serverVersion.GitVersion)
|
||||
|
||||
@@ -67,7 +66,6 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return cluster.Status.HostVersion
|
||||
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
@@ -94,44 +92,23 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
When("exposing the cluster with nodePort", func() {
|
||||
It("will have a NodePort service", func() {
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
}
|
||||
|
||||
err := k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var service v1.Service
|
||||
|
||||
Eventually(func() v1.ServiceType {
|
||||
serviceKey := client.ObjectKey{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
err := k8sClient.Get(ctx, serviceKey, &service)
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
return service.Spec.Type
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(v1.ServiceTypeNodePort))
|
||||
})
|
||||
|
||||
It("will have the specified ports exposed when specified", func() {
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
ServerPort: ptr.To[int32](30010),
|
||||
ETCDPort: ptr.To[int32](30011),
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
|
||||
|
||||
var service v1.Service
|
||||
var service corev1.Service
|
||||
|
||||
Eventually(func() v1.ServiceType {
|
||||
Eventually(func() corev1.ServiceType {
|
||||
serviceKey := client.ObjectKey{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
@@ -143,7 +120,42 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(v1.ServiceTypeNodePort))
|
||||
Should(Equal(corev1.ServiceTypeNodePort))
|
||||
})
|
||||
|
||||
It("will have the specified ports exposed when specified", func() {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
ServerPort: ptr.To[int32](30010),
|
||||
ETCDPort: ptr.To[int32](30011),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
|
||||
|
||||
var service corev1.Service
|
||||
|
||||
Eventually(func() corev1.ServiceType {
|
||||
serviceKey := client.ObjectKey{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
err := k8sClient.Get(ctx, serviceKey, &service)
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
return service.Spec.Type
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(corev1.ServiceTypeNodePort))
|
||||
|
||||
servicePorts := service.Spec.Ports
|
||||
Expect(servicePorts).NotTo(BeEmpty())
|
||||
@@ -161,18 +173,25 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
})
|
||||
|
||||
It("will not expose the port when out of range", func() {
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
ETCDPort: ptr.To[int32](2222),
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
NodePort: &v1alpha1.NodePortConfig{
|
||||
ETCDPort: ptr.To[int32](2222),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
|
||||
|
||||
var service v1.Service
|
||||
var service corev1.Service
|
||||
|
||||
Eventually(func() v1.ServiceType {
|
||||
Eventually(func() corev1.ServiceType {
|
||||
serviceKey := client.ObjectKey{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
@@ -184,7 +203,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(v1.ServiceTypeNodePort))
|
||||
Should(Equal(corev1.ServiceTypeNodePort))
|
||||
|
||||
servicePorts := service.Spec.Ports
|
||||
Expect(servicePorts).NotTo(BeEmpty())
|
||||
@@ -195,33 +214,39 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
Expect(serverPort.Port).To(BeEquivalentTo(443))
|
||||
Expect(serverPort.TargetPort.IntValue()).To(BeEquivalentTo(6443))
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
When("exposing the cluster with loadbalancer", func() {
|
||||
It("will have a LoadBalancer service with the default ports exposed", func() {
|
||||
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
|
||||
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(k8sClient.Create(ctx, cluster)).To(Succeed())
|
||||
|
||||
var service v1.Service
|
||||
var service corev1.Service
|
||||
|
||||
Eventually(func() v1.ServiceType {
|
||||
Eventually(func() error {
|
||||
serviceKey := client.ObjectKey{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
Namespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
err := k8sClient.Get(ctx, serviceKey, &service)
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
return service.Spec.Type
|
||||
return k8sClient.Get(ctx, serviceKey, &service)
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(v1.ServiceTypeLoadBalancer))
|
||||
Should(Succeed())
|
||||
|
||||
Expect(service.Spec.Type).To(Equal(corev1.ServiceTypeLoadBalancer))
|
||||
|
||||
servicePorts := service.Spec.Ports
|
||||
Expect(servicePorts).NotTo(BeEmpty())
|
||||
|
||||
@@ -9,27 +9,29 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -42,7 +44,7 @@ type PodReconciler struct {
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
func AddPodController(ctx context.Context, mgr manager.Manager) error {
|
||||
func AddPodController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := PodReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
@@ -52,9 +54,7 @@ func AddPodController(ctx context.Context, mgr manager.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Watches(&v1.Pod{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &apps.StatefulSet{}, handler.OnlyControllerOwner())).
|
||||
Named(podController).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
}).
|
||||
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
@@ -152,17 +152,14 @@ func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cl
|
||||
}
|
||||
|
||||
// remove our finalizer from the list and update it.
|
||||
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
|
||||
|
||||
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
|
||||
if err := p.Client.Update(ctx, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
|
||||
controllerutil.AddFinalizer(pod, etcdPodFinalizerName)
|
||||
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
|
||||
return p.Client.Update(ctx, pod)
|
||||
}
|
||||
|
||||
@@ -242,8 +239,8 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
|
||||
|
||||
if u.Hostname() == address {
|
||||
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
|
||||
_, err := client.MemberRemove(ctx, member.ID)
|
||||
|
||||
_, err := client.MemberRemove(ctx, member.ID)
|
||||
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
var ErrServerNotReady = errors.New("server not ready")
|
||||
@@ -77,7 +79,10 @@ func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
|
||||
var runtimeBootstrap ControlRuntimeBootstrap
|
||||
if err := json.NewDecoder(resp.Body).Decode(&runtimeBootstrap); err != nil {
|
||||
@@ -93,7 +98,7 @@ func basicAuth(username, password string) string {
|
||||
}
|
||||
|
||||
func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
//client-ca
|
||||
// client-ca
|
||||
decoded, err := base64.StdEncoding.DecodeString(bootstrap.ClientCA.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -101,7 +106,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
|
||||
bootstrap.ClientCA.Content = string(decoded)
|
||||
|
||||
//client-ca-key
|
||||
// client-ca-key
|
||||
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ClientCAKey.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -109,7 +114,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
|
||||
bootstrap.ClientCAKey.Content = string(decoded)
|
||||
|
||||
//server-ca
|
||||
// server-ca
|
||||
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCA.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -117,7 +122,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
|
||||
bootstrap.ServerCA.Content = string(decoded)
|
||||
|
||||
//server-ca-key
|
||||
// server-ca-key
|
||||
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCAKey.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -125,7 +130,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
|
||||
bootstrap.ServerCAKey.Content = string(decoded)
|
||||
|
||||
//etcd-ca
|
||||
// etcd-ca
|
||||
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCA.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -133,7 +138,7 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
|
||||
|
||||
bootstrap.ETCDServerCA.Content = string(decoded)
|
||||
|
||||
//etcd-ca-key
|
||||
// etcd-ca-key
|
||||
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCAKey.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -174,6 +179,7 @@ func GetFromSecret(ctx context.Context, client client.Client, cluster *v1alpha1.
|
||||
}
|
||||
|
||||
var bootstrap ControlRuntimeBootstrap
|
||||
|
||||
err := json.Unmarshal(bootstrapData, &bootstrap)
|
||||
|
||||
return &bootstrap, err
|
||||
|
||||
@@ -3,21 +3,28 @@ package server
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
|
||||
name := configSecretName(s.cluster.Name, init)
|
||||
s.cluster.Status.TLSSANs = append(s.cluster.Spec.TLSSANs,
|
||||
|
||||
sans := sets.NewString(s.cluster.Spec.TLSSANs...)
|
||||
sans.Insert(
|
||||
serviceIP,
|
||||
ServiceName(s.cluster.Name),
|
||||
fmt.Sprintf("%s.%s", ServiceName(s.cluster.Name), s.cluster.Namespace),
|
||||
)
|
||||
|
||||
s.cluster.Status.TLSSANs = sans.List()
|
||||
|
||||
config := serverConfigData(serviceIP, s.cluster, s.token)
|
||||
if init {
|
||||
config = initConfigData(s.cluster, s.token)
|
||||
|
||||
@@ -3,11 +3,13 @@ package server
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -3,20 +3,24 @@ package server
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/agent"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -36,12 +40,12 @@ type Server struct {
|
||||
k3SImagePullPolicy string
|
||||
}
|
||||
|
||||
func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string, k3SImage string, k3SImagePullPolicy string) *Server {
|
||||
func New(cluster *v1alpha1.Cluster, client client.Client, token string, k3SImage string, k3SImagePullPolicy string) *Server {
|
||||
return &Server{
|
||||
cluster: cluster,
|
||||
client: client,
|
||||
token: token,
|
||||
mode: mode,
|
||||
mode: string(cluster.Spec.Mode),
|
||||
k3SImage: k3SImage,
|
||||
k3SImagePullPolicy: k3SImagePullPolicy,
|
||||
}
|
||||
@@ -319,6 +323,17 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
volumeMounts = append(volumeMounts, volumeMount)
|
||||
}
|
||||
|
||||
if s.cluster.Spec.CustomCAs.Enabled {
|
||||
vols, mounts, err := s.loadCACertBundle(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumes = append(volumes, vols...)
|
||||
|
||||
volumeMounts = append(volumeMounts, mounts...)
|
||||
}
|
||||
|
||||
selector := metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"cluster": s.cluster.Name,
|
||||
@@ -379,7 +394,7 @@ func (s *Server) setupDynamicPersistence() v1.PersistentVolumeClaim {
|
||||
StorageClassName: s.cluster.Spec.Persistence.StorageClassName,
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"storage": resource.MustParse(s.cluster.Status.Persistence.StorageRequestSize),
|
||||
"storage": resource.MustParse(s.cluster.Spec.Persistence.StorageRequestSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -410,3 +425,103 @@ func (s *Server) setupStartCommand() (string, error) {
|
||||
|
||||
return output.String(), nil
|
||||
}
|
||||
|
||||
func (s *Server) loadCACertBundle(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
|
||||
customCerts := s.cluster.Spec.CustomCAs.Sources
|
||||
caCertMap := map[string]string{
|
||||
"server-ca": customCerts.ServerCA.SecretName,
|
||||
"client-ca": customCerts.ClientCA.SecretName,
|
||||
"request-header-ca": customCerts.RequestHeaderCA.SecretName,
|
||||
"etcd-peer-ca": customCerts.ETCDPeerCA.SecretName,
|
||||
"etcd-server-ca": customCerts.ETCDServerCA.SecretName,
|
||||
"service": customCerts.ServiceAccountToken.SecretName,
|
||||
}
|
||||
|
||||
var (
|
||||
volumes []v1.Volume
|
||||
mounts []v1.VolumeMount
|
||||
sortedCertIDs = sortedKeys(caCertMap)
|
||||
)
|
||||
|
||||
for _, certName := range sortedCertIDs {
|
||||
var certSecret v1.Secret
|
||||
|
||||
secretName := string(caCertMap[certName])
|
||||
key := types.NamespacedName{Name: secretName, Namespace: s.cluster.Namespace}
|
||||
|
||||
if err := s.client.Get(ctx, key, &certSecret); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cert := certSecret.Data["tls.crt"]
|
||||
keyData := certSecret.Data["tls.key"]
|
||||
|
||||
// Service account token secret is an exception (may not contain crt/key).
|
||||
if certName != "service" && (len(cert) == 0 || len(keyData) == 0) {
|
||||
return nil, nil, fmt.Errorf("cert or key is not found in secret %s", certName)
|
||||
}
|
||||
|
||||
volumeName := certName + "-vol"
|
||||
|
||||
vol, certMounts := s.mountCACert(volumeName, certName, secretName, "tls")
|
||||
volumes = append(volumes, *vol)
|
||||
mounts = append(mounts, certMounts...)
|
||||
}
|
||||
|
||||
return volumes, mounts, nil
|
||||
}
|
||||
|
||||
func (s *Server) mountCACert(volumeName, certName, secretName string, subPathMount string) (*v1.Volume, []v1.VolumeMount) {
|
||||
var (
|
||||
volume *v1.Volume
|
||||
mounts []v1.VolumeMount
|
||||
)
|
||||
|
||||
// avoid re-adding secretName in case of combined secret
|
||||
|
||||
volume = &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{SecretName: secretName},
|
||||
},
|
||||
}
|
||||
|
||||
etcdPrefix := ""
|
||||
|
||||
mountFile := certName
|
||||
|
||||
if strings.HasPrefix(certName, "etcd-") {
|
||||
etcdPrefix = "/etcd"
|
||||
mountFile = strings.TrimPrefix(certName, "etcd-")
|
||||
}
|
||||
|
||||
// add the mount for the cert except for the service account token
|
||||
if certName != "service" {
|
||||
mounts = append(mounts, v1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: fmt.Sprintf("/var/lib/rancher/k3s/server/tls%s/%s.crt", etcdPrefix, mountFile),
|
||||
SubPath: subPathMount + ".crt",
|
||||
})
|
||||
}
|
||||
|
||||
// add the mount for the key
|
||||
mounts = append(mounts, v1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: fmt.Sprintf("/var/lib/rancher/k3s/server/tls%s/%s.key", etcdPrefix, mountFile),
|
||||
SubPath: subPathMount + ".key",
|
||||
})
|
||||
|
||||
return volume, mounts
|
||||
}
|
||||
|
||||
func sortedKeys(keyMap map[string]string) []string {
|
||||
keys := make([]string, 0, len(keyMap))
|
||||
|
||||
for k := range keyMap {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
func Service(cluster *v1alpha1.Cluster) *v1.Service {
|
||||
@@ -49,22 +51,17 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
|
||||
if cluster.Spec.Expose != nil {
|
||||
expose := cluster.Spec.Expose
|
||||
|
||||
// ingress
|
||||
if expose.Ingress != nil {
|
||||
service.Spec.Type = v1.ServiceTypeClusterIP
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort, etcdPort)
|
||||
}
|
||||
|
||||
// loadbalancer
|
||||
if expose.LoadBalancer != nil {
|
||||
switch {
|
||||
case expose.LoadBalancer != nil:
|
||||
service.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
addLoadBalancerPorts(service, *expose.LoadBalancer, k3sServerPort, etcdPort)
|
||||
}
|
||||
|
||||
// nodeport
|
||||
if expose.NodePort != nil {
|
||||
case expose.NodePort != nil:
|
||||
service.Spec.Type = v1.ServiceTypeNodePort
|
||||
addNodePortPorts(service, *expose.NodePort, k3sServerPort, etcdPort)
|
||||
default:
|
||||
// default to clusterIP for ingress or empty expose config
|
||||
service.Spec.Type = v1.ServiceTypeClusterIP
|
||||
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort, etcdPort)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
97
pkg/controller/cluster/status.go
Normal file
97
pkg/controller/cluster/status.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
)
|
||||
|
||||
const (
|
||||
// Condition Types
|
||||
ConditionReady = "Ready"
|
||||
|
||||
// Condition Reasons
|
||||
ReasonValidationFailed = "ValidationFailed"
|
||||
ReasonProvisioning = "Provisioning"
|
||||
ReasonProvisioned = "Provisioned"
|
||||
ReasonProvisioningFailed = "ProvisioningFailed"
|
||||
ReasonTerminating = "Terminating"
|
||||
)
|
||||
|
||||
func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr error) {
|
||||
if !cluster.DeletionTimestamp.IsZero() {
|
||||
cluster.Status.Phase = v1alpha1.ClusterTerminating
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: ReasonTerminating,
|
||||
Message: "Cluster is being terminated",
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Handle validation errors specifically to set the Pending phase.
|
||||
if errors.Is(reconcileErr, ErrClusterValidation) {
|
||||
cluster.Status.Phase = v1alpha1.ClusterPending
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: ReasonValidationFailed,
|
||||
Message: reconcileErr.Error(),
|
||||
})
|
||||
|
||||
c.Eventf(cluster, v1.EventTypeWarning, ReasonValidationFailed, reconcileErr.Error())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if errors.Is(reconcileErr, bootstrap.ErrServerNotReady) {
|
||||
cluster.Status.Phase = v1alpha1.ClusterProvisioning
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: ReasonProvisioning,
|
||||
Message: reconcileErr.Error(),
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If there's an error, but it's not a validation error, the cluster is in a failed state.
|
||||
if reconcileErr != nil {
|
||||
cluster.Status.Phase = v1alpha1.ClusterFailed
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: ReasonProvisioningFailed,
|
||||
Message: reconcileErr.Error(),
|
||||
})
|
||||
|
||||
c.Eventf(cluster, v1.EventTypeWarning, ReasonProvisioningFailed, reconcileErr.Error())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If we reach here, everything is successful.
|
||||
cluster.Status.Phase = v1alpha1.ClusterReady
|
||||
newCondition := metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: ReasonProvisioned,
|
||||
Message: "Cluster successfully provisioned",
|
||||
}
|
||||
|
||||
// Only emit event on transition to Ready
|
||||
if !meta.IsStatusConditionPresentAndEqual(cluster.Status.Conditions, ConditionReady, metav1.ConditionTrue) {
|
||||
c.Eventf(cluster, v1.EventTypeNormal, ReasonProvisioned, newCondition.Message)
|
||||
}
|
||||
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, newCondition)
|
||||
}
|
||||
@@ -6,15 +6,17 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
|
||||
|
||||
@@ -7,8 +7,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -7,19 +7,21 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type KubeConfig struct {
|
||||
@@ -37,7 +39,7 @@ func New() *KubeConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string) (*clientcmdapi.Config, error) {
|
||||
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, port int) (*clientcmdapi.Config, error) {
|
||||
bootstrapData, err := bootstrap.GetFromSecret(ctx, client, cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -58,7 +60,7 @@ func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url, err := getURLFromService(ctx, client, cluster, hostServerIP)
|
||||
url, err := getURLFromService(ctx, client, cluster, hostServerIP, port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -91,7 +93,7 @@ func NewConfig(url string, serverCA, clientCert, clientKey []byte) *clientcmdapi
|
||||
return config
|
||||
}
|
||||
|
||||
func getURLFromService(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string) (string, error) {
|
||||
func getURLFromService(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, serverPort int) (string, error) {
|
||||
// get the server service to extract the right IP
|
||||
key := types.NamespacedName{
|
||||
Name: server.ServiceName(cluster.Name),
|
||||
@@ -115,6 +117,10 @@ func getURLFromService(ctx context.Context, client client.Client, cluster *v1alp
|
||||
port = k3kService.Spec.Ports[0].Port
|
||||
}
|
||||
|
||||
if serverPort != 0 {
|
||||
port = int32(serverPort)
|
||||
}
|
||||
|
||||
if !slices.Contains(cluster.Status.TLSSANs, ip) {
|
||||
logrus.Warnf("ip %s not in tlsSANs", ip)
|
||||
|
||||
|
||||
@@ -3,13 +3,15 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
// reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy
|
||||
|
||||
@@ -3,14 +3,16 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
|
||||
|
||||
@@ -5,21 +5,24 @@ import (
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -35,7 +38,7 @@ type VirtualClusterPolicyReconciler struct {
|
||||
}
|
||||
|
||||
// Add the controller to manage the Virtual Cluster policies
|
||||
func Add(mgr manager.Manager, clusterCIDR string) error {
|
||||
func Add(mgr manager.Manager, clusterCIDR string, maxConcurrentReconciles int) error {
|
||||
reconciler := VirtualClusterPolicyReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
@@ -50,6 +53,7 @@ func Add(mgr manager.Manager, clusterCIDR string) error {
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&v1.ResourceQuota{}).
|
||||
Owns(&v1.LimitRange{}).
|
||||
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,17 +6,18 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -35,7 +36,6 @@ var (
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
|
||||
@@ -54,7 +54,7 @@ var _ = BeforeSuite(func() {
|
||||
ctrl.SetLogger(zapr.NewLogger(zap.NewNop()))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
err = policy.Add(mgr, "")
|
||||
err = policy.Add(mgr, "", 50)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -2,30 +2,28 @@ package policy_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
k3kcontroller "github.com/rancher/k3k/pkg/controller"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("VirtualClusterPolicy"), func() {
|
||||
|
||||
Context("creating a VirtualClusterPolicy", func() {
|
||||
|
||||
It("should have the 'shared' allowedMode", func() {
|
||||
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
|
||||
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.SharedClusterMode))
|
||||
@@ -54,7 +52,6 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
})
|
||||
|
||||
When("bound to a namespace", func() {
|
||||
|
||||
var namespace *v1.Namespace
|
||||
|
||||
BeforeEach(func() {
|
||||
@@ -532,8 +529,6 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
fmt.Printf("%+v\n", resourceQuota)
|
||||
|
||||
delete(namespace.Labels, policy.PolicyNameLabelKey)
|
||||
err := k8sClient.Update(ctx, namespace)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
|
||||
ctrlruntimezap "sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
)
|
||||
|
||||
@@ -21,7 +22,7 @@ func (l *Logger) WithError(err error) log.Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *Logger) WithField(string, interface{}) log.Logger {
|
||||
func (l *Logger) WithField(string, any) log.Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,20 @@ set -eou pipefail
|
||||
|
||||
LDFLAGS="-X \"github.com/rancher/k3k/pkg/buildinfo.Version=${VERSION}\""
|
||||
|
||||
build_args=()
|
||||
|
||||
# Check if coverage is enabled, e.g., in CI or when manually set
|
||||
if [[ "${COVERAGE:-false}" == "true" ]]; then
|
||||
echo "Coverage build enabled."
|
||||
build_args+=("-cover" "-coverpkg=./..." "-covermode=atomic")
|
||||
fi
|
||||
|
||||
echo "Building k3k... [cli os/arch: $(go env GOOS)/$(go env GOARCH)]"
|
||||
echo "Current TAG: ${VERSION} "
|
||||
|
||||
export CGO_ENABLED=0
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k-kubelet ./k3k-kubelet
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k-kubelet ./k3k-kubelet
|
||||
|
||||
# build the cli for the local OS and ARCH
|
||||
go build -ldflags="${LDFLAGS}" -o bin/k3kcli ./cli
|
||||
go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3kcli ./cli
|
||||
|
||||
146
tests/cli_test.go
Normal file
146
tests/cli_test.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func K3kcli(args ...string) (string, string, error) {
|
||||
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
||||
|
||||
cmd := exec.CommandContext(context.Background(), "k3kcli", args...)
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
err := cmd.Run()
|
||||
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
|
||||
var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
It("can get the version", func() {
|
||||
stdout, _, err := K3kcli("--version")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(stdout).To(ContainSubstring("k3kcli version v"))
|
||||
})
|
||||
|
||||
When("trying the cluster commands", func() {
|
||||
It("can create, list and delete a cluster", func() {
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
|
||||
DeferCleanup(func() {
|
||||
err := k8sClient.Delete(context.Background(), &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterNamespace,
|
||||
},
|
||||
})
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
stdout, stderr, err = K3kcli("cluster", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(BeEmpty())
|
||||
Expect(stdout).To(ContainSubstring(clusterNamespace))
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "delete", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("Deleting [%s] cluster in namespace [%s]", clusterName, clusterNamespace))
|
||||
|
||||
// The deletion could take a bit
|
||||
Eventually(func() string {
|
||||
stdout, stderr, err := K3kcli("cluster", "list", "-n", clusterNamespace)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
return stdout + stderr
|
||||
}).
|
||||
WithTimeout(time.Second * 5).
|
||||
WithPolling(time.Second).
|
||||
Should(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
When("trying the policy commands", func() {
|
||||
It("can create, list and delete a policy", func() {
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
policyName := "policy-" + rand.String(5)
|
||||
|
||||
_, stderr, err = K3kcli("policy", "create", policyName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("Creating policy [%s]", policyName))
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(BeEmpty())
|
||||
Expect(stdout).To(ContainSubstring(policyName))
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "delete", policyName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(BeEmpty())
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
When("trying the kubeconfig command", func() {
|
||||
It("can generate a kubeconfig", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
|
||||
DeferCleanup(func() {
|
||||
err := k8sClient.Delete(context.Background(), &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterNamespace,
|
||||
},
|
||||
})
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
_, stderr, err = K3kcli("kubeconfig", "generate", "--name", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "delete", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("Deleting [%s] cluster in namespace [%s]", clusterName, clusterNamespace))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
)
|
||||
|
||||
var _ = When("two virtual clusters are installed", Label("e2e"), func() {
|
||||
|
||||
var (
|
||||
cluster1 *VirtualCluster
|
||||
cluster2 *VirtualCluster
|
||||
@@ -23,7 +22,6 @@ var _ = When("two virtual clusters are installed", Label("e2e"), func() {
|
||||
})
|
||||
|
||||
It("can create pods in each of them that are isolated", func() {
|
||||
|
||||
pod1Cluster1, pod1Cluster1IP := cluster1.NewNginxPod("")
|
||||
pod2Cluster1, pod2Cluster1IP := cluster1.NewNginxPod("")
|
||||
pod1Cluster2, pod1Cluster2IP := cluster2.NewNginxPod("")
|
||||
|
||||
135
tests/cluster_status_test.go
Normal file
135
tests/cluster_status_test.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster"
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a cluster's status is tracked", Label("e2e"), func() {
|
||||
var (
|
||||
namespace *corev1.Namespace
|
||||
vcp *v1alpha1.VirtualClusterPolicy
|
||||
)
|
||||
|
||||
// This BeforeEach/AfterEach will create a new namespace and a default policy for each test.
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace = NewNamespace()
|
||||
|
||||
vcp = &v1alpha1.VirtualClusterPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "policy-",
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, vcp)).To(Succeed())
|
||||
|
||||
namespace.Labels = map[string]string{
|
||||
policy.PolicyNameLabelKey: vcp.Name,
|
||||
}
|
||||
Expect(k8sClient.Update(ctx, namespace)).To(Succeed())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
err := k8sClient.Delete(context.Background(), vcp)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
Context("and the cluster is created with a valid configuration", func() {
|
||||
It("should start with Provisioning status and transition to Ready", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
clusterObj := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "status-cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
|
||||
|
||||
clusterKey := client.ObjectKeyFromObject(clusterObj)
|
||||
|
||||
// Check for the initial status to be set
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterProvisioning))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionFalse))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioning))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Second * 20).
|
||||
Should(Succeed())
|
||||
|
||||
// Check for the status to be updated to Ready
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterReady))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioned))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
Context("and the cluster has validation errors", func() {
|
||||
It("should be in Pending status with ValidationFailed reason", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
clusterObj := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Mode: v1alpha1.VirtualClusterMode,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
|
||||
|
||||
clusterKey := client.ObjectKeyFromObject(clusterObj)
|
||||
|
||||
// Check for the status to be updated
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterPending))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionFalse))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonValidationFailed))
|
||||
g.Expect(cond.Message).To(ContainSubstring(`mode "virtual" is not allowed by the policy`))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Second * 20).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -4,19 +4,21 @@ import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var _ = When("k3k is installed", Label("e2e"), func() {
|
||||
It("is in Running status", func() {
|
||||
|
||||
// check that the controller is running
|
||||
Eventually(func() bool {
|
||||
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
|
||||
@@ -42,7 +44,6 @@ var _ = When("k3k is installed", Label("e2e"), func() {
|
||||
})
|
||||
|
||||
var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
|
||||
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
@@ -60,7 +61,7 @@ var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
|
||||
It("regenerates the bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
_, err := virtualCluster.Client.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
@@ -70,7 +71,8 @@ var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -114,7 +116,6 @@ var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
|
||||
})
|
||||
|
||||
var _ = When("a dynamic cluster is installed", func() {
|
||||
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
@@ -142,7 +143,7 @@ var _ = When("a dynamic cluster is installed", func() {
|
||||
It("use the same bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
_, err := virtualCluster.Client.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
@@ -152,7 +153,8 @@ var _ = When("a dynamic cluster is installed", func() {
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -182,3 +184,94 @@ var _ = When("a dynamic cluster is installed", func() {
|
||||
Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), func() {
|
||||
ctx := context.Background()
|
||||
var virtualCluster *VirtualCluster
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
// create custom cert secret
|
||||
customCertDir := "testdata/customcerts/"
|
||||
certList := []string{
|
||||
"server-ca",
|
||||
"client-ca",
|
||||
"request-header-ca",
|
||||
"service",
|
||||
"etcd-peer-ca",
|
||||
"etcd-server-ca",
|
||||
}
|
||||
for _, certName := range certList {
|
||||
var cert, key []byte
|
||||
var err error
|
||||
filePathPrefix := ""
|
||||
certfile := certName
|
||||
if strings.HasPrefix(certName, "etcd") {
|
||||
filePathPrefix = "etcd/"
|
||||
certfile = strings.TrimPrefix(certName, "etcd-")
|
||||
}
|
||||
if !strings.Contains(certName, "service") {
|
||||
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
certSecret := caCertSecret(certName, namespace.Name, cert, key)
|
||||
err = k8sClient.Create(ctx, certSecret)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
|
||||
Enabled: true,
|
||||
Sources: v1alpha1.CredentialSources{
|
||||
ServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: "server-ca",
|
||||
},
|
||||
ClientCA: v1alpha1.CredentialSource{
|
||||
SecretName: "client-ca",
|
||||
},
|
||||
ETCDServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: "etcd-server-ca",
|
||||
},
|
||||
ETCDPeerCA: v1alpha1.CredentialSource{
|
||||
SecretName: "etcd-peer-ca",
|
||||
},
|
||||
RequestHeaderCA: v1alpha1.CredentialSource{
|
||||
SecretName: "request-header-ca",
|
||||
},
|
||||
ServiceAccountToken: v1alpha1.CredentialSource{
|
||||
SecretName: "service",
|
||||
},
|
||||
},
|
||||
}
|
||||
CreateCluster(cluster)
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
})
|
||||
It("will load the custom certs in the server pod", func() {
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
// check server-ca.crt
|
||||
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"
|
||||
serverCACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, serverCACrtPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
serverCACrtTestFile, err := os.ReadFile("testdata/customcerts/server-ca.crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(serverCACrt).To(Equal(serverCACrtTestFile))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -8,19 +8,21 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
"github.com/rancher/k3k/pkg/controller/certs"
|
||||
"github.com/rancher/k3k/pkg/controller/kubeconfig"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -191,7 +193,7 @@ func NewVirtualK8sClientAndConfig(cluster *v1alpha1.Cluster) (*kubernetes.Client
|
||||
vKubeconfig := kubeconfig.New()
|
||||
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
|
||||
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
|
||||
config, err = vKubeconfig.Generate(ctx, k8sClient, cluster, hostIP)
|
||||
config, err = vKubeconfig.Generate(ctx, k8sClient, cluster, hostIP, 0)
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
@@ -249,7 +251,7 @@ func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
|
||||
if resourceName == nginxPod.Name && resourceNamespace == nginxPod.Namespace {
|
||||
podIP = pod.Status.PodIP
|
||||
|
||||
fmt.Fprintf(GinkgoWriter,
|
||||
GinkgoWriter.Printf(
|
||||
"pod=%s resource=%s/%s status=%s podIP=%s\n",
|
||||
pod.Name, resourceNamespace, resourceName, pod.Status.Phase, podIP,
|
||||
)
|
||||
|
||||
@@ -3,10 +3,11 @@ package k3k_test
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/discovery"
|
||||
memory "k8s.io/client-go/discovery/cached"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
memory "k8s.io/client-go/discovery/cached"
|
||||
)
|
||||
|
||||
type RESTClientGetter struct {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user