mirror of
https://github.com/rancher/k3k.git
synced 2026-03-02 01:30:27 +00:00
Compare commits
13 Commits
v1.0.0-rc1
...
v1.0.0-rc3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af9e1d6ca7 | ||
|
|
ae380fa8e9 | ||
|
|
c34cf9ce94 | ||
|
|
bf70e0d171 | ||
|
|
cebf6594c4 | ||
|
|
075d72df5d | ||
|
|
ee7eac89ce | ||
|
|
514fdf6b86 | ||
|
|
730e4e1c79 | ||
|
|
a3076af38f | ||
|
|
89dc352bea | ||
|
|
7644406eeb | ||
|
|
2206632dcc |
125
.github/workflows/test-conformance-virtual.yaml
vendored
Normal file
125
.github/workflows/test-conformance-virtual.yaml
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
name: Conformance Tests - Virtual Mode
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 1 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
conformance:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type:
|
||||
- parallel
|
||||
- serial
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Install hydrophone
|
||||
run: go install sigs.k8s.io/hydrophone@latest
|
||||
|
||||
- name: Install k3s
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
K3S_HOST_VERSION: v1.32.1+k3s1
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
|
||||
- name: Build, package and setup K3k
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
export REPO=ttl.sh/$(uuidgen)
|
||||
export VERSION=1h
|
||||
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
make install
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
echo "Wait for K3k controller to be available"
|
||||
kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Create virtual cluster
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
k3kcli cluster create --mode=virtual --servers=2 mycluster
|
||||
|
||||
export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml
|
||||
|
||||
kubectl cluster-info
|
||||
kubectl get nodes
|
||||
kubectl get pods -A
|
||||
|
||||
- name: Run conformance tests (parallel)
|
||||
if: matrix.type == 'parallel'
|
||||
run: |
|
||||
# Run conformance tests in parallel mode (skipping serial)
|
||||
hydrophone --conformance --parallel 4 --skip='\[Serial\]' \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Run conformance tests (serial)
|
||||
if: matrix.type == 'serial'
|
||||
run: |
|
||||
# Run serial conformance tests
|
||||
hydrophone --focus='\[Serial\].*\[Conformance\]' \
|
||||
--kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \
|
||||
--output-dir /tmp
|
||||
|
||||
- name: Export logs
|
||||
if: always()
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
run: |
|
||||
journalctl -u k3s -o cat --no-pager > /tmp/k3s.log
|
||||
kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log
|
||||
|
||||
- name: Archive K3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3s-${{ matrix.type }}-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive K3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: k3k-${{ matrix.type }}-logs
|
||||
path: /tmp/k3k.log
|
||||
|
||||
- name: Archive conformance logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: conformance-${{ matrix.type }}-logs
|
||||
path: /tmp/e2e.log
|
||||
184
.github/workflows/test-e2e.yaml
vendored
Normal file
184
.github/workflows/test-e2e.yaml
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
name: Tests E2E
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
|
||||
echo "VERSION=1h" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
|
||||
- name: Install k3s
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
- name: Build and package and push dev images
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
make install
|
||||
|
||||
- name: Run e2e tests
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: make E2E_LABEL_FILTER="e2e && !slow" test-e2e
|
||||
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
tests-e2e-slow:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
|
||||
echo "VERSION=1h" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
|
||||
- name: Install k3s
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
- name: Build and package and push dev images
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
make install
|
||||
|
||||
- name: Run e2e tests
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: make E2E_LABEL_FILTER="e2e && slow" test-e2e
|
||||
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
87
.github/workflows/test.yaml
vendored
87
.github/workflows/test.yaml
vendored
@@ -62,93 +62,6 @@ jobs:
|
||||
files: ./cover.out
|
||||
flags: unit
|
||||
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
|
||||
echo "VERSION=1h" >> $GITHUB_ENV
|
||||
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
|
||||
|
||||
- name: Install k3s
|
||||
run: |
|
||||
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
|
||||
|
||||
- name: Build and package and push dev images
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: |
|
||||
make build
|
||||
make package
|
||||
make push
|
||||
make install
|
||||
|
||||
# add k3kcli to $PATH
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Check k3kcli
|
||||
run: k3kcli -v
|
||||
|
||||
- name: Run e2e tests
|
||||
env:
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
REPO: ${{ env.REPO }}
|
||||
VERSION: ${{ env.VERSION }}
|
||||
run: make test-e2e
|
||||
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
flags: e2e
|
||||
|
||||
- name: Archive k3s logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3s-logs
|
||||
path: /tmp/k3s.log
|
||||
|
||||
- name: Archive k3k logs
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: e2e-k3k-logs
|
||||
path: /tmp/k3k.log
|
||||
|
||||
tests-cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
7
Makefile
7
Makefile
@@ -18,6 +18,9 @@ CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
|
||||
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
|
||||
ENVTEST_DIR ?= $(shell pwd)/.envtest
|
||||
|
||||
E2E_LABEL_FILTER ?= "e2e"
|
||||
|
||||
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
|
||||
|
||||
|
||||
@@ -69,7 +72,7 @@ test-kubelet-controller: ## Run the controller tests (pkg/controller)
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: ## Run the e2e tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=e2e tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests
|
||||
|
||||
.PHONY: test-cli
|
||||
test-cli: ## Run the cli tests
|
||||
@@ -105,6 +108,8 @@ validate: generate docs fmt ## Validate the project checking for any dependency
|
||||
.PHONY: install
|
||||
install: ## Install K3k with Helm on the targeted Kubernetes cluster
|
||||
helm upgrade --install --namespace k3k-system --create-namespace \
|
||||
--set controller.extraEnv[0].name=DEBUG \
|
||||
--set-string controller.extraEnv[0].value=true \
|
||||
--set controller.image.repository=$(REPO)/k3k \
|
||||
--set controller.image.tag=$(VERSION) \
|
||||
--set agent.shared.image.repository=$(REPO)/k3k-kubelet \
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
[](https://shields.io/)
|
||||
[](https://goreportcard.com/report/github.com/rancher/k3k)
|
||||

|
||||

|
||||

|
||||
[](https://github.com/rancher/k3k/actions/workflows/test-conformance-virtual.yaml)
|
||||
|
||||
|
||||
K3k, Kubernetes in Kubernetes, is a tool that empowers you to create and manage isolated K3s clusters within your existing Kubernetes environment. It enables efficient multi-tenancy, streamlined experimentation, and robust resource isolation, minimizing infrastructure costs by allowing you to run multiple lightweight Kubernetes clusters on the same physical host. K3k offers both "shared" mode, optimizing resource utilization, and "virtual" mode, providing complete isolation with dedicated K3s server pods. This allows you to access a full Kubernetes experience without the overhead of managing separate physical resources.
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 0.3.5
|
||||
appVersion: v0.3.5
|
||||
version: 1.0.0-rc2
|
||||
appVersion: v1.0.0-rc2
|
||||
|
||||
@@ -228,6 +228,7 @@ spec:
|
||||
certificates.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled toggles this feature on or off.
|
||||
type: boolean
|
||||
sources:
|
||||
@@ -244,6 +245,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
etcdPeerCA:
|
||||
description: ETCDPeerCA specifies the etcd-peer-ca cert/key
|
||||
@@ -256,6 +259,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
etcdServerCA:
|
||||
description: ETCDServerCA specifies the etcd-server-ca cert/key
|
||||
@@ -268,6 +273,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
requestHeaderCA:
|
||||
description: RequestHeaderCA specifies the request-header-ca
|
||||
@@ -280,6 +287,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
serverCA:
|
||||
description: ServerCA specifies the server-ca cert/key pair.
|
||||
@@ -291,6 +300,8 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
serviceAccountToken:
|
||||
description: ServiceAccountToken specifies the service-account-token
|
||||
@@ -303,8 +314,20 @@ spec:
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
- For ServiceAccountTokenKey: 'tls.key'.
|
||||
type: string
|
||||
required:
|
||||
- secretName
|
||||
type: object
|
||||
required:
|
||||
- clientCA
|
||||
- etcdPeerCA
|
||||
- etcdServerCA
|
||||
- requestHeaderCA
|
||||
- serverCA
|
||||
- serviceAccountToken
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
- sources
|
||||
type: object
|
||||
expose:
|
||||
description: |-
|
||||
@@ -326,7 +349,7 @@ spec:
|
||||
use for the Ingress.
|
||||
type: string
|
||||
type: object
|
||||
loadbalancer:
|
||||
loadBalancer:
|
||||
description: LoadBalancer specifies options for exposing the API
|
||||
server through a LoadBalancer service.
|
||||
properties:
|
||||
@@ -368,7 +391,7 @@ spec:
|
||||
x-kubernetes-validations:
|
||||
- message: ingress, loadbalancer and nodePort are mutually exclusive;
|
||||
only one can be set
|
||||
rule: '[has(self.ingress), has(self.loadbalancer), has(self.nodePort)].filter(x,
|
||||
rule: '[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x,
|
||||
x).size() <= 1'
|
||||
mirrorHostNodes:
|
||||
description: |-
|
||||
@@ -584,12 +607,13 @@ spec:
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
properties:
|
||||
configmaps:
|
||||
configMaps:
|
||||
default:
|
||||
enabled: true
|
||||
description: ConfigMaps resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -599,6 +623,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
ingresses:
|
||||
default:
|
||||
@@ -606,6 +632,7 @@ spec:
|
||||
description: Ingresses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: false
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -615,6 +642,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
persistentVolumeClaims:
|
||||
default:
|
||||
@@ -622,6 +651,7 @@ spec:
|
||||
description: PersistentVolumeClaims resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -631,6 +661,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
priorityClasses:
|
||||
default:
|
||||
@@ -638,6 +670,7 @@ spec:
|
||||
description: PriorityClasses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: false
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -647,6 +680,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
secrets:
|
||||
default:
|
||||
@@ -654,6 +689,7 @@ spec:
|
||||
description: Secrets resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -670,6 +706,7 @@ spec:
|
||||
description: Services resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -679,6 +716,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
type: object
|
||||
tlsSANs:
|
||||
|
||||
@@ -230,12 +230,13 @@ spec:
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
properties:
|
||||
configmaps:
|
||||
configMaps:
|
||||
default:
|
||||
enabled: true
|
||||
description: ConfigMaps resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -245,6 +246,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
ingresses:
|
||||
default:
|
||||
@@ -252,6 +255,7 @@ spec:
|
||||
description: Ingresses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: false
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -261,6 +265,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
persistentVolumeClaims:
|
||||
default:
|
||||
@@ -268,6 +274,7 @@ spec:
|
||||
description: PersistentVolumeClaims resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -277,6 +284,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
priorityClasses:
|
||||
default:
|
||||
@@ -284,6 +293,7 @@ spec:
|
||||
description: PriorityClasses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: false
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -293,6 +303,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
secrets:
|
||||
default:
|
||||
@@ -300,6 +312,7 @@ spec:
|
||||
description: Secrets resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -316,6 +329,7 @@ spec:
|
||||
description: Services resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
@@ -325,6 +339,8 @@ spec:
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
required:
|
||||
- enabled
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
|
||||
@@ -46,6 +46,7 @@ type CreateConfig struct {
|
||||
policy string
|
||||
mirrorHostNodes bool
|
||||
customCertsPath string
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func NewClusterCreateCmd(appCtx *AppContext) *cobra.Command {
|
||||
@@ -141,7 +142,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
|
||||
|
||||
logrus.Infof("Waiting for cluster to be available..")
|
||||
|
||||
if err := waitForCluster(ctx, client, cluster); err != nil {
|
||||
if err := waitForCluster(ctx, client, cluster, config.timeout); err != nil {
|
||||
return fmt.Errorf("failed to wait for cluster to become ready (status: %s): %w", cluster.Status.Phase, err)
|
||||
}
|
||||
|
||||
@@ -210,7 +211,7 @@ func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
|
||||
}
|
||||
|
||||
if config.customCertsPath != "" {
|
||||
cluster.Spec.CustomCAs = v1beta1.CustomCAs{
|
||||
cluster.Spec.CustomCAs = &v1beta1.CustomCAs{
|
||||
Enabled: true,
|
||||
Sources: v1beta1.CredentialSources{
|
||||
ClientCA: v1beta1.CredentialSource{
|
||||
@@ -256,9 +257,8 @@ func env(envSlice []string) []v1.EnvVar {
|
||||
return envVars
|
||||
}
|
||||
|
||||
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster) error {
|
||||
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster, timeout time.Duration) error {
|
||||
interval := 5 * time.Second
|
||||
timeout := 2 * time.Minute
|
||||
|
||||
return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
|
||||
key := client.ObjectKeyFromObject(cluster)
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmds
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -28,6 +29,7 @@ func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
|
||||
cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host")
|
||||
cmd.Flags().StringVar(&cfg.policy, "policy", "", "The policy to create the cluster in")
|
||||
cmd.Flags().StringVar(&cfg.customCertsPath, "custom-certs", "", "The path for custom certificate directory")
|
||||
cmd.Flags().DurationVar(&cfg.timeout, "timeout", 3*time.Minute, "The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h).")
|
||||
}
|
||||
|
||||
func validateCreateConfig(cfg *CreateConfig) error {
|
||||
|
||||
@@ -33,6 +33,7 @@ k3kcli cluster create [command options] NAME
|
||||
--service-cidr string service CIDR
|
||||
--storage-class-name string storage class name for dynamic persistence type
|
||||
--storage-request-size string storage size for dynamic persistence type
|
||||
--timeout duration The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h). (default 3m0s)
|
||||
--token string token of the cluster
|
||||
--version string k3s version
|
||||
```
|
||||
|
||||
@@ -152,7 +152,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
@@ -208,7 +208,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled toggles this feature on or off. | | |
|
||||
| `enabled` _boolean_ | Enabled toggles this feature on or off. | true | |
|
||||
| `sources` _[CredentialSources](#credentialsources)_ | Sources defines the sources for all required custom CA certificates. | | |
|
||||
|
||||
|
||||
@@ -226,7 +226,7 @@ _Appears in:_
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `ingress` _[IngressConfig](#ingressconfig)_ | Ingress specifies options for exposing the API server through an Ingress. | | |
|
||||
| `loadbalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
|
||||
| `loadBalancer` _[LoadBalancerConfig](#loadbalancerconfig)_ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. | | |
|
||||
| `nodePort` _[NodePortConfig](#nodeportconfig)_ | NodePort specifies options for exposing the API server through NodePort. | | |
|
||||
|
||||
|
||||
@@ -260,7 +260,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
@@ -342,7 +342,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
@@ -373,7 +373,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | false | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
@@ -390,7 +390,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
@@ -407,7 +407,7 @@ _Appears in:_
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | true | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
@@ -426,7 +426,7 @@ _Appears in:_
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `services` _[ServiceSyncConfig](#servicesyncconfig)_ | Services resources sync configuration. | \{ enabled:true \} | |
|
||||
| `configmaps` _[ConfigMapSyncConfig](#configmapsyncconfig)_ | ConfigMaps resources sync configuration. | \{ enabled:true \} | |
|
||||
| `configMaps` _[ConfigMapSyncConfig](#configmapsyncconfig)_ | ConfigMaps resources sync configuration. | \{ enabled:true \} | |
|
||||
| `secrets` _[SecretSyncConfig](#secretsyncconfig)_ | Secrets resources sync configuration. | \{ enabled:true \} | |
|
||||
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
|
||||
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: example1
|
||||
spec:
|
||||
mode: "shared"
|
||||
servers: 1
|
||||
agents: 3
|
||||
token: test
|
||||
version: v1.26.0-k3s2
|
||||
clusterCIDR: 10.30.0.0/16
|
||||
serviceCIDR: 10.31.0.0/16
|
||||
clusterDNS: 10.30.0.10
|
||||
serverArgs:
|
||||
- "--write-kubeconfig-mode=777"
|
||||
expose:
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: "nginx"
|
||||
15
examples/shared-multiple-servers.yaml
Normal file
15
examples/shared-multiple-servers.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: shared-multiple-servers
|
||||
spec:
|
||||
mode: shared
|
||||
servers: 3
|
||||
agents: 3
|
||||
version: v1.33.1-k3s1
|
||||
serverArgs:
|
||||
- "--write-kubeconfig-mode=777"
|
||||
tlsSANs:
|
||||
- myserver.app
|
||||
expose:
|
||||
nodePort: {}
|
||||
14
examples/shared-single-server.yaml
Normal file
14
examples/shared-single-server.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: shared-single-server
|
||||
spec:
|
||||
mode: shared
|
||||
servers: 1
|
||||
version: v1.33.1-k3s1
|
||||
serverArgs:
|
||||
- "--write-kubeconfig-mode=777"
|
||||
tlsSANs:
|
||||
- myserver.app
|
||||
expose:
|
||||
nodePort: {}
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: single-server
|
||||
spec:
|
||||
mode: "shared"
|
||||
servers: 1
|
||||
agents: 3
|
||||
token: test
|
||||
version: v1.26.0-k3s2
|
||||
clusterCIDR: 10.30.0.0/16
|
||||
serviceCIDR: 10.31.0.0/16
|
||||
clusterDNS: 10.30.0.10
|
||||
serverArgs:
|
||||
- "--write-kubeconfig-mode=777"
|
||||
expose:
|
||||
ingress:
|
||||
enabled: true
|
||||
ingressClassName: "nginx"
|
||||
13
examples/virtual-server.yaml
Normal file
13
examples/virtual-server.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: k3k.io/v1beta1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: virtual-server
|
||||
spec:
|
||||
mode: virtual
|
||||
servers: 3
|
||||
agents: 3
|
||||
version: v1.33.1-k3s1
|
||||
tlsSANs:
|
||||
- myserver.app
|
||||
expose:
|
||||
nodePort: {}
|
||||
@@ -2,8 +2,8 @@ apiVersion: k3k.io/v1beta1
|
||||
kind: VirtualClusterPolicy
|
||||
metadata:
|
||||
name: policy-example
|
||||
# spec:
|
||||
# disableNetworkPolicy: false
|
||||
# allowedMode: "shared"
|
||||
spec:
|
||||
allowedMode: shared
|
||||
disableNetworkPolicy: true
|
||||
# podSecurityAdmissionLevel: "baseline"
|
||||
# defaultPriorityClass: "lowpriority"
|
||||
@@ -100,6 +100,10 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
|
||||
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtualConfigMap.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced configMap if exist
|
||||
|
||||
@@ -40,13 +40,6 @@ var ConfigMapTests = func() {
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Sync: &v1beta1.SyncConfig{
|
||||
ConfigMaps: v1beta1.ConfigMapSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@@ -97,6 +97,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
}
|
||||
|
||||
syncedIngress := r.ingress(&virtIngress)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -41,13 +41,6 @@ var PVCTests = func() {
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Sync: &v1beta1.SyncConfig{
|
||||
PersistentVolumeClaims: v1beta1.PersistentVolumeClaimSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-helpers/storage/volume"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
@@ -113,9 +114,14 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
pv := r.pseudoPV(&pvc)
|
||||
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return r.handlePodDeletion(ctx, pv)
|
||||
}
|
||||
|
||||
log.Info("Creating pseudo Persistent Volume")
|
||||
|
||||
pv := r.pseudoPV(&pvc)
|
||||
if err := r.VirtualClient.Create(ctx, pv); err != nil {
|
||||
return ctrlruntimeclient.IgnoreAlreadyExists(err)
|
||||
}
|
||||
@@ -188,3 +194,22 @@ func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVo
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *PodReconciler) handlePodDeletion(ctx context.Context, pv *v1.PersistentVolume) error {
|
||||
var currentPV v1.PersistentVolume
|
||||
if err := r.VirtualClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(pv), ¤tPV); err != nil {
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
pvPatch := currentPV.DeepCopy()
|
||||
pvPatch.Spec.ClaimRef = nil
|
||||
pvPatch.Status.Phase = v1.VolumeReleased
|
||||
|
||||
controllerutil.RemoveFinalizer(pvPatch, "kubernetes.io/pv-protection")
|
||||
|
||||
if err := r.VirtualClient.Status().Update(ctx, pvPatch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrlruntimeclient.IgnoreNotFound(r.VirtualClient.Delete(ctx, ¤tPV))
|
||||
}
|
||||
|
||||
@@ -117,6 +117,10 @@ func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Reque
|
||||
|
||||
hostPriorityClass := r.translatePriorityClass(priorityClass)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !priorityClass.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
|
||||
@@ -100,6 +100,10 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
|
||||
syncedSecret := s.translateSecret(&virtualSecret)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtualSecret.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced secret if exist
|
||||
|
||||
@@ -40,13 +40,6 @@ var SecretTests = func() {
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Sync: &v1beta1.SyncConfig{
|
||||
Secrets: v1beta1.SecretSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@@ -75,6 +75,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
}
|
||||
|
||||
syncedService := r.service(&virtService)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -41,13 +41,6 @@ var ServiceTests = func() {
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
Sync: &v1beta1.SyncConfig{
|
||||
Services: v1beta1.ServiceSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@@ -43,8 +43,8 @@ func main() {
|
||||
RunE: run,
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
|
||||
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "json", "Log format (json or console)")
|
||||
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "", false, "Enable debug logging")
|
||||
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text", "Log format (text or json)")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ClusterName, "cluster-name", "", "Name of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.ClusterNamespace, "cluster-namespace", "", "Namespace of the k3k cluster")
|
||||
rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", "", "K3S token of the k3k cluster")
|
||||
|
||||
4
main.go
4
main.go
@@ -62,8 +62,8 @@ func main() {
|
||||
RunE: run,
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Debug level logging")
|
||||
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "json", "Log format (json or console)")
|
||||
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "", false, "Debug level logging")
|
||||
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "text", "Log format (text or json)")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
|
||||
rootCmd.PersistentFlags().StringVar(&config.ClusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
|
||||
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "agent-shared-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
|
||||
|
||||
@@ -103,7 +103,7 @@ type ClusterSpec struct {
|
||||
// Expose specifies options for exposing the API server.
|
||||
// By default, it's only exposed as a ClusterIP.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:rule="[has(self.ingress), has(self.loadbalancer), has(self.nodePort)].filter(x, x).size() <= 1",message="ingress, loadbalancer and nodePort are mutually exclusive; only one can be set"
|
||||
// +kubebuilder:validation:XValidation:rule="[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x, x).size() <= 1",message="ingress, loadbalancer and nodePort are mutually exclusive; only one can be set"
|
||||
// +optional
|
||||
Expose *ExposeConfig `json:"expose,omitempty"`
|
||||
|
||||
@@ -176,7 +176,7 @@ type ClusterSpec struct {
|
||||
// CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
//
|
||||
// +optional
|
||||
CustomCAs CustomCAs `json:"customCAs,omitempty"`
|
||||
CustomCAs *CustomCAs `json:"customCAs,omitempty"`
|
||||
|
||||
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
|
||||
//
|
||||
@@ -190,32 +190,40 @@ type SyncConfig struct {
|
||||
// Services resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
Services ServiceSyncConfig `json:"services,omitempty"`
|
||||
// +optional
|
||||
Services ServiceSyncConfig `json:"services"`
|
||||
// ConfigMaps resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
ConfigMaps ConfigMapSyncConfig `json:"configmaps,omitempty"`
|
||||
// +optional
|
||||
ConfigMaps ConfigMapSyncConfig `json:"configMaps"`
|
||||
// Secrets resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
Secrets SecretSyncConfig `json:"secrets,omitempty"`
|
||||
// +optional
|
||||
Secrets SecretSyncConfig `json:"secrets"`
|
||||
// Ingresses resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": false}
|
||||
Ingresses IngressSyncConfig `json:"ingresses,omitempty"`
|
||||
// +optional
|
||||
Ingresses IngressSyncConfig `json:"ingresses"`
|
||||
// PersistentVolumeClaims resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
PersistentVolumeClaims PersistentVolumeClaimSyncConfig `json:"persistentVolumeClaims,omitempty"`
|
||||
// +optional
|
||||
PersistentVolumeClaims PersistentVolumeClaimSyncConfig `json:"persistentVolumeClaims"`
|
||||
// PriorityClasses resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": false}
|
||||
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses,omitempty"`
|
||||
// +optional
|
||||
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses"`
|
||||
}
|
||||
|
||||
// SecretSyncConfig specifies the sync options for services.
|
||||
type SecretSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
@@ -229,8 +237,10 @@ type SecretSyncConfig struct {
|
||||
// ServiceSyncConfig specifies the sync options for services.
|
||||
type ServiceSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
@@ -242,8 +252,10 @@ type ServiceSyncConfig struct {
|
||||
// ConfigMapSyncConfig specifies the sync options for services.
|
||||
type ConfigMapSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
@@ -255,8 +267,10 @@ type ConfigMapSyncConfig struct {
|
||||
// IngressSyncConfig specifies the sync options for services.
|
||||
type IngressSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
//
|
||||
// +kubebuilder:default=false
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
@@ -268,8 +282,10 @@ type IngressSyncConfig struct {
|
||||
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
type PersistentVolumeClaimSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
@@ -281,8 +297,10 @@ type PersistentVolumeClaimSyncConfig struct {
|
||||
// PriorityClassSyncConfig specifies the sync options for services.
|
||||
type PriorityClassSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
//
|
||||
// +kubebuilder:default=false
|
||||
// +required
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
@@ -358,7 +376,7 @@ type ExposeConfig struct {
|
||||
// LoadBalancer specifies options for exposing the API server through a LoadBalancer service.
|
||||
//
|
||||
// +optional
|
||||
LoadBalancer *LoadBalancerConfig `json:"loadbalancer,omitempty"`
|
||||
LoadBalancer *LoadBalancerConfig `json:"loadBalancer,omitempty"`
|
||||
|
||||
// NodePort specifies options for exposing the API server through NodePort.
|
||||
//
|
||||
@@ -416,32 +434,34 @@ type NodePortConfig struct {
|
||||
// CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
type CustomCAs struct {
|
||||
// Enabled toggles this feature on or off.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
//
|
||||
// +kubebuilder:default=true
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// Sources defines the sources for all required custom CA certificates.
|
||||
Sources CredentialSources `json:"sources,omitempty"`
|
||||
Sources CredentialSources `json:"sources"`
|
||||
}
|
||||
|
||||
// CredentialSources lists all the required credentials, including both
|
||||
// TLS key pairs and single signing keys.
|
||||
type CredentialSources struct {
|
||||
// ServerCA specifies the server-ca cert/key pair.
|
||||
ServerCA CredentialSource `json:"serverCA,omitempty"`
|
||||
ServerCA CredentialSource `json:"serverCA"`
|
||||
|
||||
// ClientCA specifies the client-ca cert/key pair.
|
||||
ClientCA CredentialSource `json:"clientCA,omitempty"`
|
||||
ClientCA CredentialSource `json:"clientCA"`
|
||||
|
||||
// RequestHeaderCA specifies the request-header-ca cert/key pair.
|
||||
RequestHeaderCA CredentialSource `json:"requestHeaderCA,omitempty"`
|
||||
RequestHeaderCA CredentialSource `json:"requestHeaderCA"`
|
||||
|
||||
// ETCDServerCA specifies the etcd-server-ca cert/key pair.
|
||||
ETCDServerCA CredentialSource `json:"etcdServerCA,omitempty"`
|
||||
ETCDServerCA CredentialSource `json:"etcdServerCA"`
|
||||
|
||||
// ETCDPeerCA specifies the etcd-peer-ca cert/key pair.
|
||||
ETCDPeerCA CredentialSource `json:"etcdPeerCA,omitempty"`
|
||||
ETCDPeerCA CredentialSource `json:"etcdPeerCA"`
|
||||
|
||||
// ServiceAccountToken specifies the service-account-token key.
|
||||
ServiceAccountToken CredentialSource `json:"serviceAccountToken,omitempty"`
|
||||
ServiceAccountToken CredentialSource `json:"serviceAccountToken"`
|
||||
}
|
||||
|
||||
// CredentialSource defines where to get a credential from.
|
||||
@@ -451,8 +471,7 @@ type CredentialSource struct {
|
||||
// The controller expects specific keys inside based on the credential type:
|
||||
// - For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'.
|
||||
// - For ServiceAccountTokenKey: 'tls.key'.
|
||||
// +optional
|
||||
SecretName string `json:"secretName,omitempty"`
|
||||
SecretName string `json:"secretName"`
|
||||
}
|
||||
|
||||
// ClusterStatus reflects the observed state of a Cluster.
|
||||
|
||||
@@ -163,7 +163,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
out.CustomCAs = in.CustomCAs
|
||||
if in.CustomCAs != nil {
|
||||
in, out := &in.CustomCAs, &out.CustomCAs
|
||||
*out = new(CustomCAs)
|
||||
**out = **in
|
||||
}
|
||||
if in.Sync != nil {
|
||||
in, out := &in.Sync, &out.Sync
|
||||
*out = new(SyncConfig)
|
||||
|
||||
@@ -42,11 +42,8 @@ func configSecretName(clusterName string) string {
|
||||
}
|
||||
|
||||
func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
key := ctrlruntimeclient.ObjectKeyFromObject(obj)
|
||||
|
||||
log.Info(fmt.Sprintf("ensuring %T", obj), "key", key)
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("key", key)
|
||||
|
||||
if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil {
|
||||
return err
|
||||
@@ -54,11 +51,15 @@ func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object
|
||||
|
||||
if err := cfg.client.Create(ctx, obj); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
log.V(1).Info(fmt.Sprintf("Resource %T already exists, updating.", obj))
|
||||
|
||||
return cfg.client.Update(ctx, obj)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
log.V(1).Info(fmt.Sprintf("Creating %T.", obj))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -161,10 +161,8 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", req.NamespacedName)
|
||||
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
|
||||
|
||||
log.Info("reconciling cluster")
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("Reconciling Cluster")
|
||||
|
||||
var cluster v1beta1.Cluster
|
||||
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
|
||||
@@ -178,6 +176,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
// Set initial status if not already set
|
||||
if cluster.Status.Phase == "" || cluster.Status.Phase == v1beta1.ClusterUnknown {
|
||||
log.V(1).Info("Updating Cluster status phase")
|
||||
|
||||
cluster.Status.Phase = v1beta1.ClusterProvisioning
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
Type: ConditionReady,
|
||||
@@ -195,6 +195,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
// add finalizer
|
||||
if controllerutil.AddFinalizer(&cluster, clusterFinalizerName) {
|
||||
log.V(1).Info("Updating Cluster adding finalizer")
|
||||
|
||||
if err := c.Client.Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -207,6 +209,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
reconcilerErr := c.reconcileCluster(ctx, &cluster)
|
||||
|
||||
if !equality.Semantic.DeepEqual(orig.Status, cluster.Status) {
|
||||
log.Info("Updating Cluster status")
|
||||
|
||||
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -215,7 +219,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
// if there was an error during the reconciliation, return
|
||||
if reconcilerErr != nil {
|
||||
if errors.Is(reconcilerErr, bootstrap.ErrServerNotReady) {
|
||||
log.Info("server not ready, requeueing")
|
||||
log.V(1).Info("Server not ready, requeueing")
|
||||
return reconcile.Result{RequeueAfter: time.Second * 10}, nil
|
||||
}
|
||||
|
||||
@@ -224,6 +228,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
// update Cluster if needed
|
||||
if !equality.Semantic.DeepEqual(orig.Spec, cluster.Spec) {
|
||||
log.Info("Updating Cluster")
|
||||
|
||||
if err := c.Client.Update(ctx, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -234,7 +240,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
err := c.reconcile(ctx, cluster)
|
||||
c.updateStatus(cluster, err)
|
||||
c.updateStatus(ctx, cluster, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -264,7 +270,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
|
||||
// if the Version is not specified we will try to use the same Kubernetes version of the host.
|
||||
// This version is stored in the Status object, and it will not be updated if already set.
|
||||
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
|
||||
log.Info("cluster version not set")
|
||||
log.V(1).Info("Cluster version not set. Using host version.")
|
||||
|
||||
hostVersion, err := c.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
@@ -295,7 +301,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
|
||||
if cluster.Status.ServiceCIDR == "" {
|
||||
// in shared mode try to lookup the serviceCIDR
|
||||
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
|
||||
log.Info("looking up Service CIDR for shared mode")
|
||||
log.V(1).Info("Looking up Service CIDR for shared mode")
|
||||
|
||||
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
|
||||
if err != nil {
|
||||
@@ -307,7 +313,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
|
||||
|
||||
// in virtual mode assign a default serviceCIDR
|
||||
if cluster.Spec.Mode == v1beta1.VirtualClusterMode {
|
||||
log.Info("assign default service CIDR for virtual mode")
|
||||
log.V(1).Info("assign default service CIDR for virtual mode")
|
||||
|
||||
cluster.Status.ServiceCIDR = defaultVirtualServiceCIDR
|
||||
}
|
||||
@@ -354,7 +360,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
|
||||
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
|
||||
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring bootstrap secret")
|
||||
log.V(1).Info("Ensuring bootstrap secret")
|
||||
|
||||
bootstrapData, err := bootstrap.GenerateBootstrapData(ctx, cluster, serviceIP, token)
|
||||
if err != nil {
|
||||
@@ -386,7 +392,7 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
|
||||
// ensureKubeconfigSecret will create or update the Secret containing the kubeconfig data from the k3s server
|
||||
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP string, port int) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring kubeconfig secret")
|
||||
log.V(1).Info("Ensuring Kubeconfig Secret")
|
||||
|
||||
adminKubeconfig := kubeconfig.New()
|
||||
|
||||
@@ -460,7 +466,7 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
|
||||
|
||||
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring network policy")
|
||||
log.V(1).Info("Ensuring network policy")
|
||||
|
||||
networkPolicyName := controller.SafeConcatNameWithPrefix(cluster.Name)
|
||||
|
||||
@@ -544,7 +550,7 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
|
||||
|
||||
key := client.ObjectKeyFromObject(currentNetworkPolicy)
|
||||
if result != controllerutil.OperationResultNone {
|
||||
log.Info("cluster network policy updated", "key", key, "result", result)
|
||||
log.V(1).Info("Cluster network policy updated", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -552,7 +558,7 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
|
||||
|
||||
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1beta1.Cluster) (*v1.Service, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring cluster service")
|
||||
log.V(1).Info("Ensuring Cluster Service")
|
||||
|
||||
expectedService := server.Service(cluster)
|
||||
currentService := expectedService.DeepCopy()
|
||||
@@ -572,7 +578,7 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
|
||||
|
||||
key := client.ObjectKeyFromObject(currentService)
|
||||
if result != controllerutil.OperationResultNone {
|
||||
log.Info("cluster service updated", "key", key, "result", result)
|
||||
log.V(1).Info("Cluster service updated", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return currentService, nil
|
||||
@@ -580,7 +586,7 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
|
||||
|
||||
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring cluster ingress")
|
||||
log.V(1).Info("Ensuring cluster ingress")
|
||||
|
||||
expectedServerIngress := server.Ingress(ctx, cluster)
|
||||
|
||||
@@ -608,7 +614,7 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.
|
||||
|
||||
key := client.ObjectKeyFromObject(currentServerIngress)
|
||||
if result != controllerutil.OperationResultNone {
|
||||
log.Info("cluster ingress updated", "key", key, "result", result)
|
||||
log.V(1).Info("Cluster ingress updated", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -650,7 +656,7 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster
|
||||
|
||||
if result != controllerutil.OperationResultNone {
|
||||
key := client.ObjectKeyFromObject(currentServerStatefulSet)
|
||||
log.Info("ensuring serverStatefulSet", "key", key, "result", result)
|
||||
log.V(1).Info("Ensuring server StatefulSet", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -730,8 +736,8 @@ func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.Vi
|
||||
return fmt.Errorf("%w: mode %q is not allowed by the policy %q", ErrClusterValidation, cluster.Spec.Mode, policy.Name)
|
||||
}
|
||||
|
||||
if cluster.Spec.CustomCAs.Enabled {
|
||||
if err := c.validateCustomCACerts(cluster); err != nil {
|
||||
if cluster.Spec.CustomCAs != nil && cluster.Spec.CustomCAs.Enabled {
|
||||
if err := c.validateCustomCACerts(cluster.Spec.CustomCAs.Sources); err != nil {
|
||||
return fmt.Errorf("%w: %w", ErrClusterValidation, err)
|
||||
}
|
||||
}
|
||||
@@ -753,7 +759,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
// Try to look for the serviceCIDR creating a failing service.
|
||||
// The error should contain the expected serviceCIDR
|
||||
|
||||
log.Info("looking up serviceCIDR from a failing service creation")
|
||||
log.V(1).Info("Looking up Service CIDR from a failing service creation")
|
||||
|
||||
failingSvc := v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "fail", Namespace: "default"},
|
||||
@@ -765,7 +771,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
|
||||
if len(splittedErrMsg) > 1 {
|
||||
serviceCIDR := strings.TrimSpace(splittedErrMsg[1])
|
||||
log.Info("found serviceCIDR from failing service creation: " + serviceCIDR)
|
||||
log.V(1).Info("Found Service CIDR from failing service creation: " + serviceCIDR)
|
||||
|
||||
// validate serviceCIDR
|
||||
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
|
||||
@@ -779,7 +785,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
|
||||
// Try to look for the the kube-apiserver Pod, and look for the '--service-cluster-ip-range' flag.
|
||||
|
||||
log.Info("looking up serviceCIDR from kube-apiserver pod")
|
||||
log.V(1).Info("Looking up Service CIDR from kube-apiserver pod")
|
||||
|
||||
matchingLabels := client.MatchingLabels(map[string]string{
|
||||
"component": "kube-apiserver",
|
||||
@@ -802,12 +808,12 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
for _, arg := range apiServerArgs {
|
||||
if strings.HasPrefix(arg, "--service-cluster-ip-range=") {
|
||||
serviceCIDR := strings.TrimPrefix(arg, "--service-cluster-ip-range=")
|
||||
log.Info("found serviceCIDR from kube-apiserver pod: " + serviceCIDR)
|
||||
log.V(1).Info("Found Service CIDR from kube-apiserver pod: " + serviceCIDR)
|
||||
|
||||
// validate serviceCIDR
|
||||
_, serviceCIDRAddr, err := net.ParseCIDR(serviceCIDR)
|
||||
if err != nil {
|
||||
log.Error(err, "serviceCIDR is not valid")
|
||||
log.Error(err, "Service CIDR is not valid")
|
||||
break
|
||||
}
|
||||
|
||||
@@ -822,8 +828,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
|
||||
}
|
||||
|
||||
// validateCustomCACerts will make sure that all the cert secrets exists
|
||||
func (c *ClusterReconciler) validateCustomCACerts(cluster *v1beta1.Cluster) error {
|
||||
credentialSources := cluster.Spec.CustomCAs.Sources
|
||||
func (c *ClusterReconciler) validateCustomCACerts(credentialSources v1beta1.CredentialSources) error {
|
||||
if credentialSources.ClientCA.SecretName == "" ||
|
||||
credentialSources.ServerCA.SecretName == "" ||
|
||||
credentialSources.ETCDPeerCA.SecretName == "" ||
|
||||
|
||||
@@ -12,7 +12,9 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
coordinationv1 "k8s.io/api/coordination/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
@@ -23,7 +25,7 @@ import (
|
||||
|
||||
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta1.Cluster) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("finalizing Cluster")
|
||||
log.V(1).Info("Deleting Cluster")
|
||||
|
||||
// Set the Terminating phase and condition
|
||||
cluster.Status.Phase = v1beta1.ClusterTerminating
|
||||
@@ -40,7 +42,7 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta
|
||||
|
||||
// Deallocate ports for kubelet and webhook if used
|
||||
if cluster.Spec.Mode == v1beta1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
|
||||
log.Info("dellocating ports for kubelet and webhook")
|
||||
log.V(1).Info("dellocating ports for kubelet and webhook")
|
||||
|
||||
if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -51,8 +53,25 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta
|
||||
}
|
||||
}
|
||||
|
||||
// delete API server lease
|
||||
lease := &coordinationv1.Lease{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Lease",
|
||||
APIVersion: "coordination.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cluster.Name,
|
||||
Namespace: cluster.Namespace,
|
||||
},
|
||||
}
|
||||
if err := c.Client.Delete(ctx, lease); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// Remove finalizer from the cluster and update it only when all resources are cleaned up
|
||||
if controllerutil.RemoveFinalizer(cluster, clusterFinalizerName) {
|
||||
log.Info("Deleting Cluster removing finalizer")
|
||||
|
||||
if err := c.Client.Update(ctx, cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -62,6 +81,9 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta
|
||||
}
|
||||
|
||||
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Unbinding ClusterRoles")
|
||||
|
||||
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
|
||||
|
||||
var err error
|
||||
|
||||
@@ -53,6 +53,19 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
|
||||
Expect(cluster.Spec.Version).To(BeEmpty())
|
||||
|
||||
Expect(cluster.Spec.CustomCAs).To(BeNil())
|
||||
|
||||
// sync
|
||||
// enabled by default
|
||||
Expect(cluster.Spec.Sync).To(Not(BeNil()))
|
||||
Expect(cluster.Spec.Sync.ConfigMaps.Enabled).To(BeTrue())
|
||||
Expect(cluster.Spec.Sync.PersistentVolumeClaims.Enabled).To(BeTrue())
|
||||
Expect(cluster.Spec.Sync.Secrets.Enabled).To(BeTrue())
|
||||
Expect(cluster.Spec.Sync.Services.Enabled).To(BeTrue())
|
||||
// disabled by default
|
||||
Expect(cluster.Spec.Sync.Ingresses.Enabled).To(BeFalse())
|
||||
Expect(cluster.Spec.Sync.PriorityClasses.Enabled).To(BeFalse())
|
||||
|
||||
Expect(cluster.Spec.Persistence.Type).To(Equal(v1beta1.DynamicPersistenceMode))
|
||||
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("2G"))
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -44,14 +43,10 @@ func AddPodController(ctx context.Context, mgr manager.Manager, maxConcurrentRec
|
||||
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling pod")
|
||||
log.V(1).Info("Reconciling Pod")
|
||||
|
||||
var pod v1.Pod
|
||||
if err := r.Client.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
@@ -74,6 +69,8 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
},
|
||||
}
|
||||
|
||||
log.V(1).Info("Deleting Virtual Pod", "name", virtName, "namespace", virtNamespace)
|
||||
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(virtualClient.Delete(ctx, &virtPod))
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -268,6 +269,10 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
if s.cluster.Spec.Persistence.Type == v1beta1.DynamicPersistenceMode {
|
||||
persistent = true
|
||||
pvClaim = s.setupDynamicPersistence()
|
||||
|
||||
if err := controllerutil.SetControllerReference(s.cluster, &pvClaim, s.client.Scheme()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -330,7 +335,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
volumeMounts = append(volumeMounts, volumeMount)
|
||||
}
|
||||
|
||||
if s.cluster.Spec.CustomCAs.Enabled {
|
||||
if s.cluster.Spec.CustomCAs != nil && s.cluster.Spec.CustomCAs.Enabled {
|
||||
vols, mounts, err := s.loadCACertBundle(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -434,6 +439,10 @@ func (s *Server) setupStartCommand() (string, error) {
|
||||
}
|
||||
|
||||
func (s *Server) loadCACertBundle(ctx context.Context) ([]v1.Volume, []v1.VolumeMount, error) {
|
||||
if s.cluster.Spec.CustomCAs == nil {
|
||||
return nil, nil, fmt.Errorf("customCAs not found")
|
||||
}
|
||||
|
||||
customCerts := s.cluster.Spec.CustomCAs.Sources
|
||||
caCertMap := map[string]string{
|
||||
"server-ca": customCerts.ServerCA.SecretName,
|
||||
|
||||
@@ -39,7 +39,7 @@ func AddServiceController(ctx context.Context, mgr manager.Manager, maxConcurren
|
||||
|
||||
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("ensuring service status to virtual cluster")
|
||||
log.V(1).Info("Reconciling Service")
|
||||
|
||||
var hostService v1.Service
|
||||
if err := r.HostClient.Get(ctx, req.NamespacedName, &hostService); err != nil {
|
||||
@@ -53,7 +53,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
virtualServiceNamespace, virtualServiceNamespaceFound := hostService.Annotations[translate.ResourceNamespaceAnnotation]
|
||||
|
||||
if !virtualServiceNameFound || !virtualServiceNamespaceFound {
|
||||
log.V(1).Info(fmt.Sprintf("service %s/%s does not have virtual service annotations, skipping", hostService.Namespace, hostService.Name))
|
||||
log.V(1).Info(fmt.Sprintf("Service %s/%s does not have virtual service annotations, skipping", hostService.Namespace, hostService.Name))
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -80,7 +80,10 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
}
|
||||
|
||||
if !equality.Semantic.DeepEqual(virtualService.Status.LoadBalancer, hostService.Status.LoadBalancer) {
|
||||
log.V(1).Info("Updating Virtual Service Status", "name", virtualServiceName, "namespace", virtualServiceNamespace)
|
||||
|
||||
virtualService.Status.LoadBalancer = hostService.Status.LoadBalancer
|
||||
|
||||
if err := virtualClient.Status().Update(ctx, &virtualService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func AddStatefulSetController(ctx context.Context, mgr manager.Manager, maxConcu
|
||||
|
||||
func (p *StatefulSetReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling statefulset")
|
||||
log.Info("Reconciling StatefulSet")
|
||||
|
||||
var sts apps.StatefulSet
|
||||
if err := p.Client.Get(ctx, req.NamespacedName, &sts); err != nil {
|
||||
@@ -116,10 +116,12 @@ func (p *StatefulSetReconciler) Reconcile(ctx context.Context, req reconcile.Req
|
||||
|
||||
func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1beta1.Cluster, pod *v1.Pod) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("handling server pod")
|
||||
log.V(1).Info("Handling Server Pod")
|
||||
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
|
||||
log.V(1).Info("Server Pod is being deleted. Removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
return p.Client.Update(ctx, pod)
|
||||
}
|
||||
|
||||
@@ -131,6 +133,8 @@ func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1b
|
||||
// check if cluster is deleted then remove the finalizer from the pod
|
||||
if cluster.Name == "" {
|
||||
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
|
||||
log.V(1).Info("Cluster was deleted. Deleting Server Pod removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
if err := p.Client.Update(ctx, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -161,6 +165,8 @@ func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1b
|
||||
|
||||
// remove our finalizer from the list and update it.
|
||||
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
|
||||
log.V(1).Info("Deleting Server Pod removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
if err := p.Client.Update(ctx, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -171,7 +177,7 @@ func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1b
|
||||
|
||||
func (p *StatefulSetReconciler) getETCDTLS(ctx context.Context, cluster *v1beta1.Cluster) (*tls.Config, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("generating etcd TLS client certificate", "cluster", cluster)
|
||||
log.V(1).Info("Generating ETCD TLS client certificate", "cluster", cluster)
|
||||
|
||||
token, err := p.clusterToken(ctx, cluster)
|
||||
if err != nil {
|
||||
@@ -219,7 +225,7 @@ func (p *StatefulSetReconciler) getETCDTLS(ctx context.Context, cluster *v1beta1
|
||||
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
|
||||
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("removing peer from cluster", "name", name, "address", address)
|
||||
log.V(1).Info("Removing peer from cluster", "name", name, "address", address)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
|
||||
defer cancel()
|
||||
@@ -241,7 +247,7 @@ func removePeer(ctx context.Context, client *clientv3.Client, name, address stri
|
||||
}
|
||||
|
||||
if u.Hostname() == address {
|
||||
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
|
||||
log.V(1).Info("Removing member from ETCD", "name", member.Name, "id", member.ID, "address", address)
|
||||
|
||||
_, err := client.MemberRemove(ctx, member.ID)
|
||||
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
|
||||
@@ -280,6 +286,8 @@ func (p *StatefulSetReconciler) clusterToken(ctx context.Context, cluster *v1bet
|
||||
}
|
||||
|
||||
func (p *StatefulSetReconciler) handleDeletion(ctx context.Context, sts *apps.StatefulSet) (ctrl.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
podList, err := p.listPods(ctx, sts)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
@@ -287,6 +295,8 @@ func (p *StatefulSetReconciler) handleDeletion(ctx context.Context, sts *apps.St
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName) {
|
||||
log.V(1).Info("Updating Server Pod removing finalizer", "name", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
if err := p.Client.Update(ctx, &pod); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
|
||||
@@ -24,7 +26,10 @@ const (
|
||||
ReasonTerminating = "Terminating"
|
||||
)
|
||||
|
||||
func (c *ClusterReconciler) updateStatus(cluster *v1beta1.Cluster, reconcileErr error) {
|
||||
func (c *ClusterReconciler) updateStatus(ctx context.Context, cluster *v1beta1.Cluster, reconcileErr error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Updating Cluster Conditions")
|
||||
|
||||
if !cluster.DeletionTimestamp.IsZero() {
|
||||
cluster.Status.Phase = v1beta1.ClusterTerminating
|
||||
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
|
||||
|
||||
@@ -62,7 +62,7 @@ func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1be
|
||||
return string(tokenSecret.Data["token"]), nil
|
||||
}
|
||||
|
||||
log.Info("Token secret is not specified, creating a random token")
|
||||
log.V(1).Info("Token secret is not specified, creating a random token")
|
||||
|
||||
token, err := random(16)
|
||||
if err != nil {
|
||||
@@ -77,7 +77,7 @@ func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1be
|
||||
})
|
||||
|
||||
if result != controllerutil.OperationResultNone {
|
||||
log.Info("ensuring tokenSecret", "key", key, "result", result)
|
||||
log.V(1).Info("Ensuring tokenSecret", "key", key, "result", result)
|
||||
}
|
||||
|
||||
return token, err
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling PSA labels")
|
||||
log.V(1).Info("Reconciling PSA labels")
|
||||
|
||||
// cleanup of old labels
|
||||
delete(namespace.Labels, "pod-security.kubernetes.io/enforce")
|
||||
@@ -44,7 +44,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx
|
||||
// deleting the resources in them with the "app.kubernetes.io/managed-by=k3k-policy-controller" label
|
||||
func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("deleting resources")
|
||||
log.V(1).Info("Cleanup Namespace resources")
|
||||
|
||||
var namespaces v1.NamespaceList
|
||||
if err := c.Client.List(ctx, &namespaces); err != nil {
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling NetworkPolicy")
|
||||
log.V(1).Info("Reconciling NetworkPolicy")
|
||||
|
||||
var cidrList []string
|
||||
|
||||
@@ -46,13 +46,18 @@ func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Cont
|
||||
|
||||
// if disabled then delete the existing network policy
|
||||
if policy.Spec.DisableNetworkPolicy {
|
||||
err := c.Client.Delete(ctx, networkPolicy)
|
||||
return client.IgnoreNotFound(err)
|
||||
log.V(1).Info("Deleting NetworkPolicy")
|
||||
|
||||
return client.IgnoreNotFound(c.Client.Delete(ctx, networkPolicy))
|
||||
}
|
||||
|
||||
log.V(1).Info("Creating NetworkPolicy")
|
||||
|
||||
// otherwise try to create/update
|
||||
err := c.Client.Create(ctx, networkPolicy)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
log.V(1).Info("NetworkPolicy already exists, updating.")
|
||||
|
||||
return c.Client.Update(ctx, networkPolicy)
|
||||
}
|
||||
|
||||
|
||||
@@ -248,7 +248,7 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling VirtualClusterPolicy")
|
||||
log.Info("Reconciling VirtualClusterPolicy")
|
||||
|
||||
var policy v1beta1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, req.NamespacedName, &policy); err != nil {
|
||||
@@ -261,6 +261,8 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
|
||||
|
||||
// update Status if needed
|
||||
if !reflect.DeepEqual(orig.Status, policy.Status) {
|
||||
log.Info("Updating VirtualClusterPolicy Status")
|
||||
|
||||
if err := c.Client.Status().Update(ctx, &policy); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -273,6 +275,8 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
|
||||
|
||||
// update VirtualClusterPolicy if needed
|
||||
if !reflect.DeepEqual(orig, policy) {
|
||||
log.Info("Updating VirtualClusterPolicy")
|
||||
|
||||
if err := c.Client.Update(ctx, &policy); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -295,7 +299,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx conte
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling matching Namespaces")
|
||||
log.V(1).Info("Reconciling matching Namespaces")
|
||||
|
||||
listOpts := client.MatchingLabels{
|
||||
PolicyNameLabelKey: policy.Name,
|
||||
@@ -307,8 +311,10 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
ctx = ctrl.LoggerInto(ctx, log.WithValues("namespace", ns.Name))
|
||||
log.Info("reconciling Namespace")
|
||||
log = log.WithValues("namespace", ns.Name)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
log.V(1).Info("Reconciling Namespace")
|
||||
|
||||
orig := ns.DeepCopy()
|
||||
|
||||
@@ -331,6 +337,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
|
||||
c.reconcileNamespacePodSecurityLabels(ctx, &ns, policy)
|
||||
|
||||
if !reflect.DeepEqual(orig, &ns) {
|
||||
log.Info("Updating Namespace")
|
||||
|
||||
if err := c.Client.Update(ctx, &ns); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -342,7 +350,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling ResourceQuota")
|
||||
log.V(1).Info("Reconciling ResourceQuota")
|
||||
|
||||
if policy.Spec.Quota == nil {
|
||||
// check if resourceQuota object exists and deletes it.
|
||||
@@ -357,6 +365,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, nam
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
log.V(1).Info("Deleting ResourceQuota")
|
||||
|
||||
return c.Client.Delete(ctx, &toDeleteResourceQuota)
|
||||
}
|
||||
|
||||
@@ -381,8 +391,12 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, nam
|
||||
return err
|
||||
}
|
||||
|
||||
log.V(1).Info("Creating ResourceQuota")
|
||||
|
||||
err := c.Client.Create(ctx, resourceQuota)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
log.V(1).Info("ResourceQuota already exists, updating.")
|
||||
|
||||
return c.Client.Update(ctx, resourceQuota)
|
||||
}
|
||||
|
||||
@@ -391,7 +405,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, nam
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling LimitRange")
|
||||
log.V(1).Info("Reconciling LimitRange")
|
||||
|
||||
// delete limitrange if spec.limits isnt specified.
|
||||
if policy.Spec.Limit == nil {
|
||||
@@ -406,6 +420,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, nam
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
log.V(1).Info("Deleting LimitRange")
|
||||
|
||||
return c.Client.Delete(ctx, &toDeleteLimitRange)
|
||||
}
|
||||
|
||||
@@ -429,8 +445,12 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, nam
|
||||
return err
|
||||
}
|
||||
|
||||
log.V(1).Info("Creating LimitRange")
|
||||
|
||||
err := c.Client.Create(ctx, limitRange)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
log.V(1).Info("LimitRange already exists, updating.")
|
||||
|
||||
return c.Client.Update(ctx, limitRange)
|
||||
}
|
||||
|
||||
@@ -439,7 +459,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, nam
|
||||
|
||||
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.Info("reconciling Clusters")
|
||||
log.V(1).Info("Reconciling Clusters")
|
||||
|
||||
var clusters v1beta1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace.Name)); err != nil {
|
||||
@@ -455,6 +475,8 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
|
||||
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
|
||||
|
||||
if !reflect.DeepEqual(orig, cluster) {
|
||||
log.V(1).Info("Updating Cluster", "cluster", cluster.Name, "namespace", namespace.Name)
|
||||
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ func newEncoder(format string) zapcore.Encoder {
|
||||
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
var encoder zapcore.Encoder
|
||||
if format == "console" {
|
||||
if format == "text" {
|
||||
encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
||||
encoder = zapcore.NewConsoleEncoder(encCfg)
|
||||
} else {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), func() {
|
||||
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), Label(certificatesTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
@@ -56,7 +56,7 @@ var _ = When("a cluster with custom certificates is installed with individual ce
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
cluster.Spec.CustomCAs = v1beta1.CustomCAs{
|
||||
cluster.Spec.CustomCAs = &v1beta1.CustomCAs{
|
||||
Enabled: true,
|
||||
Sources: v1beta1.CredentialSources{
|
||||
ServerCA: v1beta1.CredentialSource{
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("two virtual clusters are installed", Label("e2e"), func() {
|
||||
var _ = When("two virtual clusters are installed", Label("e2e"), Label(networkingTestsLabel), func() {
|
||||
var (
|
||||
cluster1 *VirtualCluster
|
||||
cluster2 *VirtualCluster
|
||||
|
||||
@@ -20,15 +20,15 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("an ephemeral cluster is installed", Label("e2e"), func() {
|
||||
var _ = When("an ephemeral cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
})
|
||||
|
||||
It("can create a nginx pod", func() {
|
||||
@@ -111,7 +111,7 @@ var _ = When("an ephemeral cluster is installed", Label("e2e"), func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a dynamic cluster is installed", Label("e2e"), func() {
|
||||
var _ = When("a dynamic cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a cluster's status is tracked", Label("e2e"), func() {
|
||||
var _ = When("a cluster's status is tracked", Label("e2e"), Label(statusTestsLabel), func() {
|
||||
var (
|
||||
namespace *corev1.Namespace
|
||||
vcp *v1beta1.VirtualClusterPolicy
|
||||
|
||||
145
tests/cluster_sync_test.go
Normal file
145
tests/cluster_sync_test.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a shared mode cluster is created", Ordered, Label("e2e"), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
virtualConfigMap *corev1.ConfigMap
|
||||
virtualService *corev1.Service
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
})
|
||||
|
||||
When("a ConfigMap is created in the virtual cluster", func() {
|
||||
BeforeAll(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
virtualConfigMap = &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
virtualConfigMap, err = virtualCluster.Client.CoreV1().ConfigMaps("default").Create(ctx, virtualConfigMap, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("is replicated in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
namespacedName := hostTranslator.NamespacedName(virtualConfigMap)
|
||||
|
||||
// check that the ConfigMap is synced in the host cluster
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := k8s.CoreV1().ConfigMaps(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("a Service is created in the virtual cluster", func() {
|
||||
BeforeAll(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
virtualService = &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-svc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Ports: []corev1.ServicePort{{Port: 8888}},
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
virtualService, err = virtualCluster.Client.CoreV1().Services("default").Create(ctx, virtualService, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("is replicated in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
namespacedName := hostTranslator.NamespacedName(virtualService)
|
||||
|
||||
// check that the ConfigMap is synced in the host cluster
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := k8s.CoreV1().Services(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("the cluster is deleted", func() {
|
||||
BeforeAll(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
By("Deleting cluster")
|
||||
|
||||
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("will delete the ConfigMap from the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
namespacedName := hostTranslator.NamespacedName(virtualConfigMap)
|
||||
|
||||
// check that the ConfigMap is deleted from the host cluster
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := k8s.CoreV1().ConfigMaps(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{})
|
||||
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("will delete the Service from the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
namespacedName := hostTranslator.NamespacedName(virtualService)
|
||||
|
||||
// check that the Service is deleted from the host cluster
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := k8s.CoreV1().Services(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{})
|
||||
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -18,12 +18,16 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a shared mode cluster update its envs", Label("e2e"), func() {
|
||||
var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// Add initial environment variables for server
|
||||
@@ -152,12 +156,16 @@ var _ = When("a shared mode cluster update its envs", Label("e2e"), func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a shared mode cluster update its server args", Label("e2e"), func() {
|
||||
var _ = When("a shared mode cluster update its server args", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// Add initial args for server
|
||||
@@ -207,12 +215,16 @@ var _ = When("a shared mode cluster update its server args", Label("e2e"), func(
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster update its envs", Label("e2e"), func() {
|
||||
var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// Add initial environment variables for server
|
||||
@@ -239,7 +251,7 @@ var _ = When("a virtual mode cluster update its envs", Label("e2e"), func() {
|
||||
}
|
||||
|
||||
cluster.Spec.Mode = v1beta1.VirtualClusterMode
|
||||
cluster.Spec.Agents = ptr.To(int32(1))
|
||||
cluster.Spec.Agents = ptr.To[int32](1)
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
@@ -344,7 +356,7 @@ var _ = When("a virtual mode cluster update its envs", Label("e2e"), func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster update its server args", Label("e2e"), func() {
|
||||
var _ = When("a virtual mode cluster update its server args", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
@@ -358,7 +370,7 @@ var _ = When("a virtual mode cluster update its server args", Label("e2e"), func
|
||||
}
|
||||
|
||||
cluster.Spec.Mode = v1beta1.VirtualClusterMode
|
||||
cluster.Spec.Agents = ptr.To(int32(1))
|
||||
cluster.Spec.Agents = ptr.To[int32](1)
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
@@ -402,7 +414,7 @@ var _ = When("a virtual mode cluster update its server args", Label("e2e"), func
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a shared mode cluster update its version", Label("e2e"), func() {
|
||||
var _ = When("a shared mode cluster update its version", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
@@ -411,6 +423,10 @@ var _ = When("a shared mode cluster update its version", Label("e2e"), func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// Add initial version
|
||||
@@ -437,7 +453,12 @@ var _ = When("a shared mode cluster update its version", Label("e2e"), func() {
|
||||
Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
|
||||
|
||||
nginxPod, _ = virtualCluster.NewNginxPod("")
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
})
|
||||
|
||||
It("will update server version when version spec is updated", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
ctx := context.Background()
|
||||
@@ -457,8 +478,7 @@ var _ = When("a shared mode cluster update its version", Label("e2e"), func() {
|
||||
g.Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
serverPod := serverPods[0]
|
||||
condIndex, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(condIndex).NotTo(Equal(-1))
|
||||
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
@@ -468,20 +488,19 @@ var _ = When("a shared mode cluster update its version", Label("e2e"), func() {
|
||||
g.Expect(err).To(BeNil())
|
||||
g.Expect(clusterVersion.String()).To(Equal(strings.ReplaceAll(cluster.Spec.Version, "-", "+")))
|
||||
|
||||
_, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
condIndex, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(condIndex).NotTo(Equal(-1))
|
||||
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster update its version", Label("e2e"), func() {
|
||||
var _ = When("a virtual mode cluster update its version", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
@@ -490,13 +509,17 @@ var _ = When("a virtual mode cluster update its version", Label("e2e"), func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// Add initial version
|
||||
cluster.Spec.Version = "v1.31.13-k3s1"
|
||||
|
||||
cluster.Spec.Mode = v1beta1.VirtualClusterMode
|
||||
cluster.Spec.Agents = ptr.To(int32(1))
|
||||
cluster.Spec.Agents = ptr.To[int32](1)
|
||||
|
||||
// need to enable persistence for this
|
||||
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
|
||||
@@ -544,8 +567,7 @@ var _ = When("a virtual mode cluster update its version", Label("e2e"), func() {
|
||||
g.Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
serverPod := serverPods[0]
|
||||
condIndex, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(condIndex).NotTo(Equal(-1))
|
||||
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
@@ -556,8 +578,7 @@ var _ = When("a virtual mode cluster update its version", Label("e2e"), func() {
|
||||
g.Expect(len(agentPods)).To(Equal(1))
|
||||
|
||||
agentPod := agentPods[0]
|
||||
condIndex, cond = pod.GetPodCondition(&agentPod.Status, v1.PodReady)
|
||||
g.Expect(condIndex).NotTo(Equal(-1))
|
||||
_, cond = pod.GetPodCondition(&agentPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
@@ -570,8 +591,7 @@ var _ = When("a virtual mode cluster update its version", Label("e2e"), func() {
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
condIndex, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(condIndex).NotTo(Equal(-1))
|
||||
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
@@ -580,3 +600,381 @@ var _ = When("a virtual mode cluster update its version", Label("e2e"), func() {
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a shared mode cluster scales up servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// need to enable persistence for this
|
||||
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
|
||||
Type: v1beta1.DynamicPersistenceMode,
|
||||
}
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
sPods := listServerPods(ctx, virtualCluster)
|
||||
Expect(len(sPods)).To(Equal(1))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
// since there is no way to check nodes in shared mode
|
||||
// we can check if the endpoints are registered to N nodes
|
||||
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(1))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
|
||||
nginxPod, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
It("will scale up server pods", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// scale cluster servers to 3 nodes
|
||||
cluster.Spec.Servers = ptr.To[int32](3)
|
||||
|
||||
err = k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
// server pods
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(serverPods)).To(Equal(3))
|
||||
|
||||
for _, serverPod := range serverPods {
|
||||
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}
|
||||
|
||||
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(3))
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a shared mode cluster scales down servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
// start cluster with 3 servers
|
||||
cluster.Spec.Servers = ptr.To[int32](3)
|
||||
|
||||
// need to enable persistence for this
|
||||
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
|
||||
Type: v1beta1.DynamicPersistenceMode,
|
||||
}
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
// no need to check servers status since createCluster() will wait until all servers are in ready state
|
||||
sPods := listServerPods(ctx, virtualCluster)
|
||||
Expect(len(sPods)).To(Equal(3))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
// since there is no way to check nodes in shared mode
|
||||
// we can check if the endpoints are registered to N nodes
|
||||
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(3))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
|
||||
nginxPod, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
It("will scale down server pods", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// scale down cluster servers to 1 node
|
||||
cluster.Spec.Servers = ptr.To[int32](1)
|
||||
|
||||
err = k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
// server pods
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
_, cond := pod.GetPodCondition(&serverPods[0].Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(1))
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster scales up servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
cluster.Spec.Mode = v1beta1.VirtualClusterMode
|
||||
|
||||
// need to enable persistence for this
|
||||
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
|
||||
Type: v1beta1.DynamicPersistenceMode,
|
||||
}
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
sPods := listServerPods(ctx, virtualCluster)
|
||||
Expect(len(sPods)).To(Equal(1))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes.Items)).To(Equal(1))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute * 5).
|
||||
Should(Succeed())
|
||||
|
||||
nginxPod, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
It("will scale up server pods", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// scale cluster servers to 3 nodes
|
||||
cluster.Spec.Servers = ptr.To[int32](3)
|
||||
|
||||
err = k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
// server pods
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(serverPods)).To(Equal(3))
|
||||
|
||||
for _, serverPod := range serverPods {
|
||||
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}
|
||||
|
||||
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes.Items)).To(Equal(3))
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster scales down servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
cluster.Spec.Mode = v1beta1.VirtualClusterMode
|
||||
|
||||
// start cluster with 3 servers
|
||||
cluster.Spec.Servers = ptr.To[int32](3)
|
||||
|
||||
// need to enable persistence for this
|
||||
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
|
||||
Type: v1beta1.DynamicPersistenceMode,
|
||||
}
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
// no need to check servers status since createCluster() will wait until all servers are in ready state
|
||||
sPods := listServerPods(ctx, virtualCluster)
|
||||
Expect(len(sPods)).To(Equal(3))
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := virtualCluster.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes.Items)).To(Equal(3))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute * 5).
|
||||
Should(Succeed())
|
||||
|
||||
nginxPod, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("will scale down server pods", func() {
|
||||
By("Scaling down cluster")
|
||||
|
||||
var cluster v1beta1.Cluster
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// scale down cluster servers to 1 node
|
||||
cluster.Spec.Servers = ptr.To[int32](1)
|
||||
|
||||
err = k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
// Wait for all the server pods to be marked for deletion
|
||||
for _, serverPod := range serverPods {
|
||||
g.Expect(serverPod.DeletionTimestamp).NotTo(BeNil())
|
||||
}
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second * 5).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
|
||||
By("Waiting for cluster to be ready again")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
// server pods
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
_, cond := pod.GetPodCondition(&serverPods[0].Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
// we can't check for number of nodes in scale down because the nodes will be there but in a non-ready state
|
||||
k8sEndpointSlices, err := virtualCluster.Client.DiscoveryV1().EndpointSlices("default").Get(ctx, "kubernetes", metav1.GetOptions{})
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(len(k8sEndpointSlices.Endpoints)).To(Equal(1))
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second * 5).
|
||||
WithTimeout(time.Minute * 2).
|
||||
Should(Succeed())
|
||||
|
||||
By("Checking that Nginx Pod is Running")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
// TODO: there is a possible issue where the Pod is not being marked as Ready
|
||||
// if the kubelet lost the sync with the API server.
|
||||
// We check for the ContainersReady status (all containers in the pod are ready),
|
||||
// but this is probably to investigate.
|
||||
// Related issue (?): https://github.com/kubernetes/kubernetes/issues/82346
|
||||
|
||||
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.ContainersReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -14,10 +14,11 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
@@ -87,11 +88,11 @@ func NewVirtualClusters(n int) []*VirtualCluster {
|
||||
return clusters
|
||||
}
|
||||
|
||||
func NewNamespace() *corev1.Namespace {
|
||||
func NewNamespace() *v1.Namespace {
|
||||
GinkgoHelper()
|
||||
|
||||
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
|
||||
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, v1.CreateOptions{})
|
||||
namespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
|
||||
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return namespace
|
||||
@@ -120,7 +121,7 @@ func deleteNamespace(name string) {
|
||||
|
||||
By(fmt.Sprintf("Deleting namespace %s", name))
|
||||
|
||||
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, v1.DeleteOptions{
|
||||
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
|
||||
GracePeriodSeconds: ptr.To[int64](0),
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
@@ -128,7 +129,7 @@ func deleteNamespace(name string) {
|
||||
|
||||
func NewCluster(namespace string) *v1beta1.Cluster {
|
||||
return &v1beta1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
@@ -154,36 +155,43 @@ func CreateCluster(cluster *v1beta1.Cluster) {
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Waiting for cluster to be ready")
|
||||
expectedServers := int(*cluster.Spec.Servers)
|
||||
expectedAgents := int(*cluster.Spec.Agents)
|
||||
|
||||
By(fmt.Sprintf("Waiting for cluster to be ready. Expected servers: %d. Expected agents: %d", expectedServers, expectedAgents))
|
||||
|
||||
// track the Eventually status to log for changes
|
||||
prev := -1
|
||||
|
||||
// check that the server Pod and the Kubelet are in Ready state
|
||||
Eventually(func() bool {
|
||||
podList, err := k8s.CoreV1().Pods(cluster.Namespace).List(ctx, v1.ListOptions{})
|
||||
podList, err := k8s.CoreV1().Pods(cluster.Namespace).List(ctx, metav1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// all the servers and agents needs to be in a running phase
|
||||
var serversReady, agentsReady int
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Labels["role"] == "server" {
|
||||
GinkgoLogr.Info(fmt.Sprintf("server pod=%s/%s status=%s", pod.Namespace, pod.Name, pod.Status.Phase))
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
serversReady++
|
||||
}
|
||||
for _, k3sPod := range podList.Items {
|
||||
_, cond := pod.GetPodCondition(&k3sPod.Status, v1.PodReady)
|
||||
|
||||
// pod not ready
|
||||
if cond == nil || cond.Status != v1.ConditionTrue {
|
||||
continue
|
||||
}
|
||||
|
||||
if pod.Labels["type"] == "agent" {
|
||||
GinkgoLogr.Info(fmt.Sprintf("agent pod=%s/%s status=%s", pod.Namespace, pod.Name, pod.Status.Phase))
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
agentsReady++
|
||||
}
|
||||
if k3sPod.Labels["role"] == "server" {
|
||||
serversReady++
|
||||
}
|
||||
|
||||
if k3sPod.Labels["type"] == "agent" {
|
||||
agentsReady++
|
||||
}
|
||||
}
|
||||
|
||||
expectedServers := int(*cluster.Spec.Servers)
|
||||
expectedAgents := int(*cluster.Spec.Agents)
|
||||
|
||||
By(fmt.Sprintf("serversReady=%d/%d agentsReady=%d/%d", serversReady, expectedServers, agentsReady, expectedAgents))
|
||||
if prev != (serversReady + agentsReady) {
|
||||
GinkgoLogr.Info("Waiting for pods to be Ready", "servers", serversReady, "agents", agentsReady, "time", time.Now().Format(time.DateTime))
|
||||
prev = (serversReady + agentsReady)
|
||||
}
|
||||
|
||||
// the server pods should equal the expected servers, but since in shared mode we also have the kubelet is fine to have more than one
|
||||
if (serversReady != expectedServers) || (agentsReady < expectedAgents) {
|
||||
@@ -193,8 +201,10 @@ func CreateCluster(cluster *v1beta1.Cluster) {
|
||||
return true
|
||||
}).
|
||||
WithTimeout(time.Minute * 5).
|
||||
WithPolling(time.Second * 5).
|
||||
WithPolling(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
|
||||
By("Cluster is ready")
|
||||
}
|
||||
|
||||
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
|
||||
@@ -236,41 +246,60 @@ func NewVirtualK8sClientAndConfig(cluster *v1beta1.Cluster) (*kubernetes.Clients
|
||||
return virtualK8sClient, restcfg
|
||||
}
|
||||
|
||||
func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
|
||||
func (c *VirtualCluster) NewNginxPod(namespace string) (*v1.Pod, string) {
|
||||
GinkgoHelper()
|
||||
|
||||
if namespace == "" {
|
||||
namespace = "default"
|
||||
}
|
||||
|
||||
nginxPod := &corev1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
nginxPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "nginx-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
By("Creating Pod")
|
||||
By("Creating Nginx Pod and waiting for it to be Ready")
|
||||
|
||||
ctx := context.Background()
|
||||
nginxPod, err := c.Client.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, v1.CreateOptions{})
|
||||
|
||||
var err error
|
||||
|
||||
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Create(ctx, nginxPod, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
var podIP string
|
||||
// check that the nginx Pod is up and running in the virtual cluster
|
||||
Eventually(func(g Gomega) {
|
||||
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
|
||||
By(fmt.Sprintf("Nginx Pod is running (%s/%s)", nginxPod.Namespace, nginxPod.Name))
|
||||
|
||||
// only check the pod on the host cluster if the mode is shared mode
|
||||
if c.Cluster.Spec.Mode != v1beta1.SharedClusterMode {
|
||||
return nginxPod, ""
|
||||
}
|
||||
|
||||
var podIP string
|
||||
|
||||
// check that the nginx Pod is up and running in the host cluster
|
||||
Eventually(func() bool {
|
||||
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, v1.ListOptions{})
|
||||
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, metav1.ListOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
@@ -285,7 +314,7 @@ func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
|
||||
pod.Name, resourceNamespace, resourceName, pod.Status.Phase, podIP,
|
||||
)
|
||||
|
||||
return pod.Status.Phase == corev1.PodRunning && podIP != ""
|
||||
return pod.Status.Phase == v1.PodRunning && podIP != ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,16 +324,12 @@ func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeTrue())
|
||||
|
||||
// get the running pod from the virtual cluster
|
||||
nginxPod, err = c.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, v1.GetOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return nginxPod, podIP
|
||||
}
|
||||
|
||||
// ExecCmd exec command on specific pod and wait the command's output.
|
||||
func (c *VirtualCluster) ExecCmd(pod *corev1.Pod, command string) (string, string, error) {
|
||||
option := &corev1.PodExecOptions{
|
||||
func (c *VirtualCluster) ExecCmd(pod *v1.Pod, command string) (string, string, error) {
|
||||
option := &v1.PodExecOptions{
|
||||
Command: []string{"sh", "-c", command},
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
@@ -336,7 +361,7 @@ func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
|
||||
GinkgoHelper()
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
@@ -344,40 +369,40 @@ func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
|
||||
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, metav1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
|
||||
}
|
||||
|
||||
func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []corev1.Pod {
|
||||
func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []v1.Pod {
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return serverPods.Items
|
||||
}
|
||||
|
||||
func listAgentPods(ctx context.Context, virtualCluster *VirtualCluster) []corev1.Pod {
|
||||
func listAgentPods(ctx context.Context, virtualCluster *VirtualCluster) []v1.Pod {
|
||||
labelSelector := fmt.Sprintf("cluster=%s,type=agent,mode=%s", virtualCluster.Cluster.Name, virtualCluster.Cluster.Spec.Mode)
|
||||
|
||||
agentPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
agentPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return agentPods.Items
|
||||
}
|
||||
|
||||
// getEnv will get an environment variable from a pod it will return empty string if not found
|
||||
func getEnv(pod *corev1.Pod, envName string) (string, bool) {
|
||||
func getEnv(pod *v1.Pod, envName string) (string, bool) {
|
||||
container := pod.Spec.Containers[0]
|
||||
for _, envVar := range container.Env {
|
||||
if envVar.Name == envName {
|
||||
@@ -389,7 +414,7 @@ func getEnv(pod *corev1.Pod, envName string) (string, bool) {
|
||||
}
|
||||
|
||||
// isArgFound will return true if the argument passed to the function is found in container args
|
||||
func isArgFound(pod *corev1.Pod, arg string) bool {
|
||||
func isArgFound(pod *v1.Pod, arg string) bool {
|
||||
container := pod.Spec.Containers[0]
|
||||
for _, cmd := range container.Command {
|
||||
if strings.Contains(cmd, arg) {
|
||||
|
||||
@@ -26,10 +26,11 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -41,6 +42,13 @@ import (
|
||||
const (
|
||||
k3kNamespace = "k3k-system"
|
||||
k3kName = "k3k"
|
||||
|
||||
slowTestsLabel = "slow"
|
||||
updateTestsLabel = "update"
|
||||
persistenceTestsLabel = "persistence"
|
||||
networkingTestsLabel = "networking"
|
||||
statusTestsLabel = "status"
|
||||
certificatesTestsLabel = "certificates"
|
||||
)
|
||||
|
||||
func TestTests(t *testing.T) {
|
||||
@@ -116,7 +124,7 @@ func initKubernetesClient(ctx context.Context) {
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
err := corev1.AddToScheme(scheme)
|
||||
err := v1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@@ -215,25 +223,25 @@ func installK3kChart() {
|
||||
}
|
||||
|
||||
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
|
||||
pvc := &corev1.PersistentVolumeClaim{
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "coverage-data-pvc",
|
||||
Namespace: k3kNamespace,
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: corev1.VolumeResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: resource.MustParse("100M"),
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("100M"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := clientset.CoreV1().PersistentVolumeClaims(k3kNamespace).Create(ctx, pvc, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(client.IgnoreAlreadyExists(err)).To(Not(HaveOccurred()))
|
||||
|
||||
patchData := []byte(`
|
||||
{
|
||||
@@ -340,9 +348,9 @@ var _ = AfterSuite(func() {
|
||||
// dumpK3kCoverageData will kill the K3k controller container to force it to dump the coverage data.
|
||||
// It will then download the files with kubectl cp into the specified folder. If the folder doesn't exists it will be created.
|
||||
func dumpK3kCoverageData(ctx context.Context, folder string) {
|
||||
GinkgoWriter.Println("Restarting k3k controller...")
|
||||
By("Restarting k3k controller")
|
||||
|
||||
var podList corev1.PodList
|
||||
var podList v1.PodList
|
||||
|
||||
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
@@ -353,26 +361,31 @@ func dumpK3kCoverageData(ctx context.Context, folder string) {
|
||||
output, err := cmd.CombinedOutput()
|
||||
Expect(err).NotTo(HaveOccurred(), string(output))
|
||||
|
||||
Eventually(func() corev1.PodPhase {
|
||||
var pod corev1.Pod
|
||||
By("Waiting to be ready again")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{
|
||||
Namespace: k3kNamespace,
|
||||
Name: k3kPod.Name,
|
||||
}
|
||||
err = k8sClient.Get(ctx, key, &pod)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Printf("K3k controller status: %s\n", pod.Status.Phase)
|
||||
var controllerPod v1.Pod
|
||||
|
||||
return pod.Status.Phase
|
||||
err = k8sClient.Get(ctx, key, &controllerPod)
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
_, cond := pod.GetPodCondition(&controllerPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Equal(corev1.PodRunning))
|
||||
WithTimeout(time.Minute * 2).
|
||||
Should(Succeed())
|
||||
|
||||
GinkgoWriter.Printf("Downloading covdata from k3k controller to %s\n", folder)
|
||||
By("Controller is ready again, dumping coverage data")
|
||||
|
||||
GinkgoWriter.Printf("Downloading covdata from k3k controller %s/%s to %s\n", k3kNamespace, k3kPod.Name, folder)
|
||||
|
||||
cmd = exec.Command("kubectl", "cp", fmt.Sprintf("%s/%s:/tmp/covdata", k3kNamespace, k3kPod.Name), folder)
|
||||
output, err = cmd.CombinedOutput()
|
||||
@@ -380,13 +393,13 @@ func dumpK3kCoverageData(ctx context.Context, folder string) {
|
||||
}
|
||||
|
||||
func getK3kLogs(ctx context.Context) io.ReadCloser {
|
||||
var podList corev1.PodList
|
||||
var podList v1.PodList
|
||||
|
||||
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k3kPod := podList.Items[0]
|
||||
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &corev1.PodLogOptions{Previous: true})
|
||||
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &v1.PodLogOptions{Previous: true})
|
||||
podLogs, err := req.Stream(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -427,13 +440,13 @@ func podExec(ctx context.Context, clientset *kubernetes.Clientset, config *rest.
|
||||
SubResource("exec")
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
if err := corev1.AddToScheme(scheme); err != nil {
|
||||
if err := v1.AddToScheme(scheme); err != nil {
|
||||
return nil, fmt.Errorf("error adding to scheme: %v", err)
|
||||
}
|
||||
|
||||
parameterCodec := runtime.NewParameterCodec(scheme)
|
||||
|
||||
req.VersionedParams(&corev1.PodExecOptions{
|
||||
req.VersionedParams(&v1.PodExecOptions{
|
||||
Command: command,
|
||||
Stdin: stdin != nil,
|
||||
Stdout: stdout != nil,
|
||||
@@ -461,8 +474,8 @@ func podExec(ctx context.Context, clientset *kubernetes.Clientset, config *rest.
|
||||
return stderr.Bytes(), nil
|
||||
}
|
||||
|
||||
func caCertSecret(name, namespace string, crt, key []byte) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
func caCertSecret(name, namespace string, crt, key []byte) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
|
||||
Reference in New Issue
Block a user