mirror of
https://github.com/rancher/k3k.git
synced 2026-04-10 04:36:53 +00:00
Compare commits
19 Commits
chart-1.0.
...
renovate/r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d5943d054 | ||
|
|
d9c504283e | ||
|
|
4738ab2c56 | ||
|
|
7c1292b262 | ||
|
|
3038276b02 | ||
|
|
6da59f16b9 | ||
|
|
118ed838a7 | ||
|
|
fe924d3b89 | ||
|
|
831821206e | ||
|
|
16f7a71861 | ||
|
|
b055aeff0f | ||
|
|
5ef99f83ee | ||
|
|
f55dd45775 | ||
|
|
f2badbfdb1 | ||
|
|
e87d879a91 | ||
|
|
502f4389d2 | ||
|
|
1a16527750 | ||
|
|
e7df4ed7f0 | ||
|
|
9fae02fcbf |
12
.github/workflows/build.yml
vendored
12
.github/workflows/build.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
REGISTRY: ""
|
||||
|
||||
- name: Run Trivy vulnerability scanner (k3kcli)
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1
|
||||
uses: aquasecurity/trivy-action@97e0b3872f55f89b95b2f65b3dbab56962816478 # 0.34.2
|
||||
with:
|
||||
ignore-unfixed: true
|
||||
severity: 'MEDIUM,HIGH,CRITICAL'
|
||||
@@ -50,13 +50,13 @@ jobs:
|
||||
output: 'trivy-results-k3kcli.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab (k3kcli)
|
||||
uses: github/codeql-action/upload-sarif@38e701f46e33fb233075bf4238cb1e5d68e429e4 # v3
|
||||
uses: github/codeql-action/upload-sarif@5c8a8a642e79153f5d047b10ec1cba1d1cc65699 # v3
|
||||
with:
|
||||
sarif_file: trivy-results-k3kcli.sarif
|
||||
category: k3kcli
|
||||
|
||||
- name: Run Trivy vulnerability scanner (k3k)
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1
|
||||
uses: aquasecurity/trivy-action@97e0b3872f55f89b95b2f65b3dbab56962816478 # 0.34.2
|
||||
with:
|
||||
ignore-unfixed: true
|
||||
severity: 'MEDIUM,HIGH,CRITICAL'
|
||||
@@ -66,13 +66,13 @@ jobs:
|
||||
output: 'trivy-results-k3k.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab (k3k)
|
||||
uses: github/codeql-action/upload-sarif@38e701f46e33fb233075bf4238cb1e5d68e429e4 # v3
|
||||
uses: github/codeql-action/upload-sarif@5c8a8a642e79153f5d047b10ec1cba1d1cc65699 # v3
|
||||
with:
|
||||
sarif_file: trivy-results-k3k.sarif
|
||||
category: k3k
|
||||
|
||||
- name: Run Trivy vulnerability scanner (k3k-kubelet)
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1
|
||||
uses: aquasecurity/trivy-action@97e0b3872f55f89b95b2f65b3dbab56962816478 # 0.34.2
|
||||
with:
|
||||
ignore-unfixed: true
|
||||
severity: 'MEDIUM,HIGH,CRITICAL'
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
output: 'trivy-results-k3k-kubelet.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab (k3k-kubelet)
|
||||
uses: github/codeql-action/upload-sarif@38e701f46e33fb233075bf4238cb1e5d68e429e4 # v3
|
||||
uses: github/codeql-action/upload-sarif@5c8a8a642e79153f5d047b10ec1cba1d1cc65699 # v3
|
||||
with:
|
||||
sarif_file: trivy-results-k3k-kubelet.sarif
|
||||
category: k3k-kubelet
|
||||
|
||||
6
.github/workflows/fossa.yml
vendored
6
.github/workflows/fossa.yml
vendored
@@ -15,18 +15,18 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
# The FOSSA token is shared between all repos in Rancher's GH org. It can be
|
||||
# used directly and there is no need to request specific access to EIO.
|
||||
- name: Read FOSSA token
|
||||
uses: rancher-eio/read-vault-secrets@main
|
||||
uses: rancher-eio/read-vault-secrets@0da85151ad1f19ed7986c41587e45aac1ace74b6 # v3
|
||||
with:
|
||||
secrets: |
|
||||
secret/data/github/org/rancher/fossa/push token | FOSSA_API_KEY_PUSH_ONLY
|
||||
|
||||
- name: FOSSA scan
|
||||
uses: fossas/fossa-action@main
|
||||
uses: fossas/fossa-action@c414b9ad82eaad041e47a7cf62a4f02411f427a0 # v1.8.0
|
||||
with:
|
||||
api-key: ${{ env.FOSSA_API_KEY_PUSH_ONLY }}
|
||||
# Only runs the scan and do not provide/returns any results back to the
|
||||
|
||||
11
.github/workflows/release.yml
vendored
11
.github/workflows/release.yml
vendored
@@ -37,9 +37,16 @@ jobs:
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
|
||||
with:
|
||||
image: tonistiigi/binfmt:qemu-v10.0.4-56
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
|
||||
with:
|
||||
version: v0.30.1
|
||||
|
||||
- name: "Read secrets"
|
||||
uses: rancher-eio/read-vault-secrets@main
|
||||
uses: rancher-eio/read-vault-secrets@0da85151ad1f19ed7986c41587e45aac1ace74b6 # v3
|
||||
if: github.repository_owner == 'rancher'
|
||||
with:
|
||||
secrets: |
|
||||
@@ -55,7 +62,7 @@ jobs:
|
||||
echo "DOCKER_PASSWORD=${{ github.token }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to container registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ env.DOCKER_USERNAME }}
|
||||
|
||||
2
.github/workflows/renovate-vault.yml
vendored
2
.github/workflows/renovate-vault.yml
vendored
@@ -51,7 +51,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
call-workflow:
|
||||
uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@release
|
||||
uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@20f34a3e3d54ab17f4fd5a3037edd62f58e26c7a # release
|
||||
with:
|
||||
configMigration: ${{ inputs.configMigration || 'true' }}
|
||||
logLevel: ${{ inputs.logLevel || 'info' }}
|
||||
|
||||
22
.github/workflows/test-conformance-shared.yaml
vendored
22
.github/workflows/test-conformance-shared.yaml
vendored
@@ -8,6 +8,10 @@ on:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
ARCH: amd64
|
||||
KUBECTL_VERSION: v1.35.3
|
||||
|
||||
jobs:
|
||||
conformance:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -18,7 +22,9 @@ jobs:
|
||||
type:
|
||||
- parallel
|
||||
- serial
|
||||
|
||||
env:
|
||||
K3D_VERSION: v5.8.3
|
||||
K3D_BIN_HASH_AMD64: "dbaa79a76ace7f4ca230a1ff41dc7d8a5036a8ad0309e9c54f9bf3836dbe853e"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
@@ -34,14 +40,18 @@ jobs:
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
|
||||
|
||||
- name: Install hydrophone
|
||||
run: go install sigs.k8s.io/hydrophone@latest
|
||||
run: go install sigs.k8s.io/hydrophone@3de3e886a2f6f09635d8b981c195490af1584d97 #v0.7.0
|
||||
|
||||
- name: Install k3d # taken from github.com/rancher/rancher/.github/workflows/integration-tests.yaml
|
||||
run: |
|
||||
curl -sSfL -o k3d "https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-linux-${ARCH}"
|
||||
echo "${K3D_BIN_HASH_AMD64} k3d" | shasum -a 256 --check
|
||||
sudo mv k3d /usr/local/bin
|
||||
sudo chmod +x /usr/local/bin/k3d
|
||||
|
||||
- name: Install k3d and kubectl
|
||||
run: |
|
||||
wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
|
||||
k3d version
|
||||
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
|
||||
|
||||
- name: Setup Kubernetes (k3d)
|
||||
env:
|
||||
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
|
||||
|
||||
- name: Install hydrophone
|
||||
run: go install sigs.k8s.io/hydrophone@latest
|
||||
run: go install sigs.k8s.io/hydrophone@3de3e886a2f6f09635d8b981c195490af1584d97 #v0.7.0
|
||||
|
||||
- name: Install k3s
|
||||
env:
|
||||
|
||||
12
.github/workflows/test-e2e.yaml
vendored
12
.github/workflows/test-e2e.yaml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo@5d1d628ac86668c8f944c8c491c3d1ab86b3bed4 #v2.28.1
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
@@ -64,14 +64,14 @@ jobs:
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
|
||||
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
|
||||
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo@5d1d628ac86668c8f944c8c491c3d1ab86b3bed4 #v2.28.1
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
@@ -143,14 +143,14 @@ jobs:
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
|
||||
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
|
||||
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
|
||||
6
.github/workflows/test.yaml
vendored
6
.github/workflows/test.yaml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
run: make test-unit
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
|
||||
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./cover.out
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo@5d1d628ac86668c8f944c8c491c3d1ab86b3bed4 #v2.28.1
|
||||
|
||||
- name: Setup environment
|
||||
run: |
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
|
||||
uses: codecov/codecov-action@75cd11691c0faa626561e295848008c8a7dddffe # v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${{ github.workspace }}/covdata/cover.out
|
||||
|
||||
2
.github/workflows/validate.yml
vendored
2
.github/workflows/validate.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
- name: Run linters
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
with:
|
||||
version: v2.8.0
|
||||
version: v2.11.3
|
||||
args: -v
|
||||
only-new-issues: true
|
||||
skip-cache: false
|
||||
|
||||
30
Makefile
30
Makefile
@@ -5,8 +5,8 @@ VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*")
|
||||
|
||||
## Dependencies
|
||||
|
||||
GOLANGCI_LINT_VERSION := v2.8.0
|
||||
GINKGO_VERSION ?= v2.21.0
|
||||
GOLANGCI_LINT_VERSION := v2.11.3
|
||||
GINKGO_VERSION ?= v2.28.1
|
||||
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
|
||||
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
|
||||
ENVTEST_K8S_VERSION := 1.31.0
|
||||
@@ -60,24 +60,32 @@ test: ## Run all the tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=$(label-filter)
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## Run the unit tests (skips the e2e)
|
||||
test-unit: ## Run the unit tests (skips the e2e and integration tests)
|
||||
$(GINKGO) $(GINKGO_FLAGS) --skip-file=tests/*
|
||||
|
||||
.PHONY: test-controller
|
||||
test-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) $(GINKGO_FLAGS) pkg/controller
|
||||
.PHONY: test-kubelet
|
||||
test-kubelet: ## Run the k3k-kubelet controller tests (tests/integration/k3k-kubelet)
|
||||
$(GINKGO) $(GINKGO_FLAGS) tests/integration/k3k-kubelet
|
||||
|
||||
.PHONY: test-kubelet-controller
|
||||
test-kubelet-controller: ## Run the controller tests (pkg/controller)
|
||||
$(GINKGO) $(GINKGO_FLAGS) k3k-kubelet/controller
|
||||
.PHONY: test-policy
|
||||
test-policy: ## Run the policy controller tests (tests/integration/policy)
|
||||
$(GINKGO) $(GINKGO_FLAGS) tests/integration/policy
|
||||
|
||||
.PHONY: test-cluster
|
||||
test-cluster: ## Run the cluster controller tests (tests/integration/cluster)
|
||||
$(GINKGO) $(GINKGO_FLAGS) tests/integration/cluster
|
||||
|
||||
.PHONY: test-integration
|
||||
test-integration: ## Run the controller tests that use envtest (tests/integration)
|
||||
$(GINKGO) $(GINKGO_FLAGS) tests/integration
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: ## Run the e2e tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests/e2e
|
||||
|
||||
.PHONY: test-cli
|
||||
test-cli: ## Run the cli tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --label-filter=cli --flake-attempts=3 tests
|
||||
$(GINKGO) $(GINKGO_FLAGS) --flake-attempts=3 tests/cli
|
||||
|
||||
.PHONY: generate
|
||||
generate: ## Generate the CRDs specs
|
||||
|
||||
@@ -67,7 +67,7 @@ To install it, simply download the latest available version for your architectur
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.1/k3kcli-linux-amd64 && \
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.2/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
@@ -75,7 +75,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.1/k3kcli-l
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli version v1.0.1
|
||||
k3kcli version v1.0.2
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 1.0.2-rc2
|
||||
appVersion: v1.0.2-rc2
|
||||
version: 1.0.2
|
||||
appVersion: v1.0.2
|
||||
|
||||
@@ -3,8 +3,8 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
controller-gen.kubebuilder.io/version: v0.20.0
|
||||
helm.sh/resource-policy: keep
|
||||
name: clusters.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
@@ -55,11 +55,9 @@ spec:
|
||||
description: Spec defines the desired state of the Cluster.
|
||||
properties:
|
||||
addons:
|
||||
description: Addons specifies secrets containing raw YAML to deploy
|
||||
on cluster startup.
|
||||
description: Addons specifies secrets containing raw YAML to deploy on cluster startup.
|
||||
items:
|
||||
description: Addon specifies a Secret containing YAML to be deployed
|
||||
on cluster startup.
|
||||
description: Addon specifies a Secret containing YAML to be deployed on cluster startup.
|
||||
properties:
|
||||
secretNamespace:
|
||||
description: SecretNamespace is the namespace of the Secret.
|
||||
@@ -77,11 +75,9 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
agentEnvs:
|
||||
description: AgentEnvs specifies list of environment variables to
|
||||
set in the agent pod.
|
||||
description: AgentEnvs specifies list of environment variables to set in the agent pod.
|
||||
items:
|
||||
description: EnvVar represents an environment variable present in
|
||||
a Container.
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
@@ -99,8 +95,7 @@ spec:
|
||||
Defaults to "".
|
||||
type: string
|
||||
valueFrom:
|
||||
description: Source for the environment variable's value. Cannot
|
||||
be used if value is not empty.
|
||||
description: Source for the environment variable's value. Cannot be used if value is not empty.
|
||||
properties:
|
||||
configMapKeyRef:
|
||||
description: Selects a key of a ConfigMap.
|
||||
@@ -118,8 +113,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the ConfigMap or its key
|
||||
must be defined
|
||||
description: Specify whether the ConfigMap or its key must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
@@ -131,12 +125,10 @@ spec:
|
||||
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: Version of the schema the FieldPath is
|
||||
written in terms of, defaults to "v1".
|
||||
description: Version of the schema the FieldPath is written in terms of, defaults to "v1".
|
||||
type: string
|
||||
fieldPath:
|
||||
description: Path of the field to select in the specified
|
||||
API version.
|
||||
description: Path of the field to select in the specified API version.
|
||||
type: string
|
||||
required:
|
||||
- fieldPath
|
||||
@@ -148,15 +140,13 @@ spec:
|
||||
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
|
||||
properties:
|
||||
containerName:
|
||||
description: 'Container name: required for volumes,
|
||||
optional for env vars'
|
||||
description: 'Container name: required for volumes, optional for env vars'
|
||||
type: string
|
||||
divisor:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Specifies the output format of the exposed
|
||||
resources, defaults to "1"
|
||||
description: Specifies the output format of the exposed resources, defaults to "1"
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
resource:
|
||||
@@ -170,8 +160,7 @@ spec:
|
||||
description: Selects a key of a secret in the pod's namespace
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
description: The key of the secret to select from. Must be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
@@ -183,8 +172,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must
|
||||
be defined
|
||||
description: Specify whether the Secret or its key must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
@@ -225,16 +213,14 @@ spec:
|
||||
- message: clusterDNS is immutable
|
||||
rule: self == oldSelf
|
||||
customCAs:
|
||||
description: CustomCAs specifies the cert/key pairs for custom CA
|
||||
certificates.
|
||||
description: CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
properties:
|
||||
enabled:
|
||||
default: true
|
||||
description: Enabled toggles this feature on or off.
|
||||
type: boolean
|
||||
sources:
|
||||
description: Sources defines the sources for all required custom
|
||||
CA certificates.
|
||||
description: Sources defines the sources for all required custom CA certificates.
|
||||
properties:
|
||||
clientCA:
|
||||
description: ClientCA specifies the client-ca cert/key pair.
|
||||
@@ -249,8 +235,7 @@ spec:
|
||||
- secretName
|
||||
type: object
|
||||
etcdPeerCA:
|
||||
description: ETCDPeerCA specifies the etcd-peer-ca cert/key
|
||||
pair.
|
||||
description: ETCDPeerCA specifies the etcd-peer-ca cert/key pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
@@ -262,8 +247,7 @@ spec:
|
||||
- secretName
|
||||
type: object
|
||||
etcdServerCA:
|
||||
description: ETCDServerCA specifies the etcd-server-ca cert/key
|
||||
pair.
|
||||
description: ETCDServerCA specifies the etcd-server-ca cert/key pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
@@ -275,8 +259,7 @@ spec:
|
||||
- secretName
|
||||
type: object
|
||||
requestHeaderCA:
|
||||
description: RequestHeaderCA specifies the request-header-ca
|
||||
cert/key pair.
|
||||
description: RequestHeaderCA specifies the request-header-ca cert/key pair.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
@@ -300,8 +283,7 @@ spec:
|
||||
- secretName
|
||||
type: object
|
||||
serviceAccountToken:
|
||||
description: ServiceAccountToken specifies the service-account-token
|
||||
key.
|
||||
description: ServiceAccountToken specifies the service-account-token key.
|
||||
properties:
|
||||
secretName:
|
||||
description: |-
|
||||
@@ -330,23 +312,19 @@ spec:
|
||||
By default, it's only exposed as a ClusterIP.
|
||||
properties:
|
||||
ingress:
|
||||
description: Ingress specifies options for exposing the API server
|
||||
through an Ingress.
|
||||
description: Ingress specifies options for exposing the API server through an Ingress.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Annotations specifies annotations to add to the
|
||||
Ingress.
|
||||
description: Annotations specifies annotations to add to the Ingress.
|
||||
type: object
|
||||
ingressClassName:
|
||||
description: IngressClassName specifies the IngressClass to
|
||||
use for the Ingress.
|
||||
description: IngressClassName specifies the IngressClass to use for the Ingress.
|
||||
type: string
|
||||
type: object
|
||||
loadBalancer:
|
||||
description: LoadBalancer specifies options for exposing the API
|
||||
server through a LoadBalancer service.
|
||||
description: LoadBalancer specifies options for exposing the API server through a LoadBalancer service.
|
||||
properties:
|
||||
etcdPort:
|
||||
description: |-
|
||||
@@ -364,8 +342,7 @@ spec:
|
||||
type: integer
|
||||
type: object
|
||||
nodePort:
|
||||
description: NodePort specifies options for exposing the API server
|
||||
through NodePort.
|
||||
description: NodePort specifies options for exposing the API server through NodePort.
|
||||
properties:
|
||||
etcdPort:
|
||||
description: |-
|
||||
@@ -384,10 +361,8 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: ingress, loadbalancer and nodePort are mutually exclusive;
|
||||
only one can be set
|
||||
rule: '[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x,
|
||||
x).size() <= 1'
|
||||
- message: ingress, loadbalancer and nodePort are mutually exclusive; only one can be set
|
||||
rule: '[has(self.ingress), has(self.loadBalancer), has(self.nodePort)].filter(x, x).size() <= 1'
|
||||
mirrorHostNodes:
|
||||
description: |-
|
||||
MirrorHostNodes controls whether node objects from the host cluster
|
||||
@@ -514,8 +489,7 @@ spec:
|
||||
secret contents will be mounted.
|
||||
type: string
|
||||
optional:
|
||||
description: optional field specify whether the Secret or its
|
||||
keys must be defined
|
||||
description: optional field specify whether the Secret or its keys must be defined
|
||||
type: boolean
|
||||
role:
|
||||
description: |-
|
||||
@@ -547,11 +521,9 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
serverEnvs:
|
||||
description: ServerEnvs specifies list of environment variables to
|
||||
set in the server pod.
|
||||
description: ServerEnvs specifies list of environment variables to set in the server pod.
|
||||
items:
|
||||
description: EnvVar represents an environment variable present in
|
||||
a Container.
|
||||
description: EnvVar represents an environment variable present in a Container.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the environment variable. Must be a C_IDENTIFIER.
|
||||
@@ -569,8 +541,7 @@ spec:
|
||||
Defaults to "".
|
||||
type: string
|
||||
valueFrom:
|
||||
description: Source for the environment variable's value. Cannot
|
||||
be used if value is not empty.
|
||||
description: Source for the environment variable's value. Cannot be used if value is not empty.
|
||||
properties:
|
||||
configMapKeyRef:
|
||||
description: Selects a key of a ConfigMap.
|
||||
@@ -588,8 +559,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the ConfigMap or its key
|
||||
must be defined
|
||||
description: Specify whether the ConfigMap or its key must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
@@ -601,12 +571,10 @@ spec:
|
||||
spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: Version of the schema the FieldPath is
|
||||
written in terms of, defaults to "v1".
|
||||
description: Version of the schema the FieldPath is written in terms of, defaults to "v1".
|
||||
type: string
|
||||
fieldPath:
|
||||
description: Path of the field to select in the specified
|
||||
API version.
|
||||
description: Path of the field to select in the specified API version.
|
||||
type: string
|
||||
required:
|
||||
- fieldPath
|
||||
@@ -618,15 +586,13 @@ spec:
|
||||
(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
|
||||
properties:
|
||||
containerName:
|
||||
description: 'Container name: required for volumes,
|
||||
optional for env vars'
|
||||
description: 'Container name: required for volumes, optional for env vars'
|
||||
type: string
|
||||
divisor:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: Specifies the output format of the exposed
|
||||
resources, defaults to "1"
|
||||
description: Specifies the output format of the exposed resources, defaults to "1"
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
resource:
|
||||
@@ -640,8 +606,7 @@ spec:
|
||||
description: Selects a key of a secret in the pod's namespace
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
description: The key of the secret to select from. Must be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
default: ""
|
||||
@@ -653,8 +618,7 @@ spec:
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must
|
||||
be defined
|
||||
description: Specify whether the Secret or its key must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
@@ -695,8 +659,7 @@ spec:
|
||||
rule: self == oldSelf
|
||||
sync:
|
||||
default: {}
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
description: Sync specifies the resources types that will be synced from virtual cluster to host cluster.
|
||||
properties:
|
||||
configMaps:
|
||||
default:
|
||||
@@ -812,8 +775,7 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
tlsSANs:
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
description: TLSSANs specifies subject alternative names for the K3s server certificate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@@ -823,12 +785,10 @@ spec:
|
||||
The Secret must have a "token" field in its data.
|
||||
properties:
|
||||
name:
|
||||
description: name is unique within a namespace to reference a
|
||||
secret resource.
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret
|
||||
name must be unique.
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
@@ -859,11 +819,9 @@ spec:
|
||||
description: ClusterDNS is the IP address for the CoreDNS service.
|
||||
type: string
|
||||
conditions:
|
||||
description: Conditions are the individual conditions for the cluster
|
||||
set.
|
||||
description: Conditions are the individual conditions for the cluster set.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
description: Condition contains details for one aspect of the current state of this API Resource.
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
@@ -920,13 +878,11 @@ spec:
|
||||
description: HostVersion is the Kubernetes version of the host node.
|
||||
type: string
|
||||
kubeletPort:
|
||||
description: KubeletPort specefies the port used by k3k-kubelet in
|
||||
shared mode.
|
||||
description: KubeletPort specefies the port used by k3k-kubelet in shared mode.
|
||||
type: integer
|
||||
phase:
|
||||
default: Unknown
|
||||
description: Phase is a high-level summary of the cluster's current
|
||||
lifecycle state.
|
||||
description: Phase is a high-level summary of the cluster's current lifecycle state.
|
||||
enum:
|
||||
- Pending
|
||||
- Provisioning
|
||||
@@ -935,22 +891,39 @@ spec:
|
||||
- Terminating
|
||||
- Unknown
|
||||
type: string
|
||||
policy:
|
||||
description: |-
|
||||
policy represents the status of the policy applied to this cluster.
|
||||
This field is set by the VirtualClusterPolicy controller.
|
||||
properties:
|
||||
name:
|
||||
description: name is the name of the VirtualClusterPolicy currently applied to this cluster.
|
||||
minLength: 1
|
||||
type: string
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: nodeSelector is a node selector enforced by the active VirtualClusterPolicy.
|
||||
type: object
|
||||
priorityClass:
|
||||
description: priorityClass is the priority class enforced by the active VirtualClusterPolicy.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
policyName:
|
||||
description: PolicyName specifies the virtual cluster policy name
|
||||
bound to the virtual cluster.
|
||||
description: PolicyName specifies the virtual cluster policy name bound to the virtual cluster.
|
||||
type: string
|
||||
serviceCIDR:
|
||||
description: ServiceCIDR is the CIDR range for service IPs.
|
||||
type: string
|
||||
tlsSANs:
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
description: TLSSANs specifies subject alternative names for the K3s server certificate.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
webhookPort:
|
||||
description: WebhookPort specefies the port used by webhook in k3k-kubelet
|
||||
in shared mode.
|
||||
description: WebhookPort specefies the port used by webhook in k3k-kubelet in shared mode.
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
|
||||
@@ -3,8 +3,8 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
controller-gen.kubebuilder.io/version: v0.20.0
|
||||
helm.sh/resource-policy: keep
|
||||
name: virtualclusterpolicies.k3k.io
|
||||
spec:
|
||||
group: k3k.io
|
||||
@@ -51,8 +51,7 @@ spec:
|
||||
properties:
|
||||
allowedMode:
|
||||
default: shared
|
||||
description: AllowedMode specifies the allowed cluster provisioning
|
||||
mode. Defaults to "shared".
|
||||
description: AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared".
|
||||
enum:
|
||||
- shared
|
||||
- virtual
|
||||
@@ -63,16 +62,13 @@ spec:
|
||||
defaultNodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: DefaultNodeSelector specifies the node selector that
|
||||
applies to all clusters (server + agent) in the target Namespace.
|
||||
description: DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace.
|
||||
type: object
|
||||
defaultPriorityClass:
|
||||
description: DefaultPriorityClass specifies the priorityClassName
|
||||
applied to all pods of all clusters in the target Namespace.
|
||||
description: DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace.
|
||||
type: string
|
||||
disableNetworkPolicy:
|
||||
description: DisableNetworkPolicy indicates whether to disable the
|
||||
creation of a default network policy for cluster isolation.
|
||||
description: DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation.
|
||||
type: boolean
|
||||
limit:
|
||||
description: |-
|
||||
@@ -80,11 +76,9 @@ spec:
|
||||
to set defaults and constraints (min/max)
|
||||
properties:
|
||||
limits:
|
||||
description: Limits is the list of LimitRangeItem objects that
|
||||
are enforced.
|
||||
description: Limits is the list of LimitRangeItem objects that are enforced.
|
||||
items:
|
||||
description: LimitRangeItem defines a min/max usage limit for
|
||||
any resource that matches on kind.
|
||||
description: LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
|
||||
properties:
|
||||
default:
|
||||
additionalProperties:
|
||||
@@ -93,8 +87,7 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Default resource requirement limit value by
|
||||
resource name if resource limit is omitted.
|
||||
description: Default resource requirement limit value by resource name if resource limit is omitted.
|
||||
type: object
|
||||
defaultRequest:
|
||||
additionalProperties:
|
||||
@@ -103,9 +96,7 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: DefaultRequest is the default resource requirement
|
||||
request value by resource name if resource request is
|
||||
omitted.
|
||||
description: DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
|
||||
type: object
|
||||
max:
|
||||
additionalProperties:
|
||||
@@ -114,8 +105,7 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Max usage constraints on this kind by resource
|
||||
name.
|
||||
description: Max usage constraints on this kind by resource name.
|
||||
type: object
|
||||
maxLimitRequestRatio:
|
||||
additionalProperties:
|
||||
@@ -124,11 +114,7 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimitRequestRatio if specified, the named
|
||||
resource must have a request and limit that are both non-zero
|
||||
where limit divided by request is less than or equal to
|
||||
the enumerated value; this represents the max burst for
|
||||
the named resource.
|
||||
description: MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
|
||||
type: object
|
||||
min:
|
||||
additionalProperties:
|
||||
@@ -137,8 +123,7 @@ spec:
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Min usage constraints on this kind by resource
|
||||
name.
|
||||
description: Min usage constraints on this kind by resource name.
|
||||
type: object
|
||||
type:
|
||||
description: Type of resource that this limit applies to.
|
||||
@@ -152,16 +137,14 @@ spec:
|
||||
- limits
|
||||
type: object
|
||||
podSecurityAdmissionLevel:
|
||||
description: PodSecurityAdmissionLevel specifies the pod security
|
||||
admission level applied to the pods in the namespace.
|
||||
description: PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace.
|
||||
enum:
|
||||
- privileged
|
||||
- baseline
|
||||
- restricted
|
||||
type: string
|
||||
quota:
|
||||
description: Quota specifies the resource limits for clusters within
|
||||
a clusterpolicy.
|
||||
description: Quota specifies the resource limits for clusters within a clusterpolicy.
|
||||
properties:
|
||||
hard:
|
||||
additionalProperties:
|
||||
@@ -181,8 +164,7 @@ spec:
|
||||
For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of scope selector requirements by scope
|
||||
of the resources.
|
||||
description: A list of scope selector requirements by scope of the resources.
|
||||
items:
|
||||
description: |-
|
||||
A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
|
||||
@@ -194,8 +176,7 @@ spec:
|
||||
Valid operators are In, NotIn, Exists, DoesNotExist.
|
||||
type: string
|
||||
scopeName:
|
||||
description: The name of the scope that the selector
|
||||
applies to.
|
||||
description: The name of the scope that the selector applies to.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
@@ -220,16 +201,14 @@ spec:
|
||||
A collection of filters that must match each object tracked by a quota.
|
||||
If not specified, the quota matches all objects.
|
||||
items:
|
||||
description: A ResourceQuotaScope defines a filter that must
|
||||
match each object tracked by a quota
|
||||
description: A ResourceQuotaScope defines a filter that must match each object tracked by a quota
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
sync:
|
||||
default: {}
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
description: Sync specifies the resources types that will be synced from virtual cluster to host cluster.
|
||||
properties:
|
||||
configMaps:
|
||||
default:
|
||||
@@ -349,11 +328,9 @@ spec:
|
||||
description: Status reflects the observed state of the VirtualClusterPolicy.
|
||||
properties:
|
||||
conditions:
|
||||
description: Conditions are the individual conditions for the cluster
|
||||
set.
|
||||
description: Conditions are the individual conditions for the cluster set.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
description: Condition contains details for one aspect of the current state of this API Resource.
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
@@ -407,12 +384,10 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
lastUpdateTime:
|
||||
description: LastUpdate is the timestamp when the status was last
|
||||
updated.
|
||||
description: LastUpdate is the timestamp when the status was last updated.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration was the generation at the time the
|
||||
status was updated.
|
||||
description: ObservedGeneration was the generation at the time the status was updated.
|
||||
format: int64
|
||||
type: integer
|
||||
summary:
|
||||
|
||||
@@ -41,6 +41,29 @@ _Appears In:_
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-appliedpolicy"]
|
||||
=== AppliedPolicy
|
||||
|
||||
|
||||
|
||||
AppliedPolicy defines the observed state of an applied policy.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus[$$ClusterStatus$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`name`* __string__ | name is the name of the VirtualClusterPolicy currently applied to this cluster. + | | MinLength: 1 +
|
||||
|
||||
| *`priorityClass`* __string__ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. + | |
|
||||
| *`nodeSelector`* __object (keys:string, values:string)__ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster"]
|
||||
=== Cluster
|
||||
|
||||
|
||||
@@ -32,6 +32,24 @@ _Appears in:_
|
||||
| `secretRef` _string_ | SecretRef is the name of the Secret. | | |
|
||||
|
||||
|
||||
#### AppliedPolicy
|
||||
|
||||
|
||||
|
||||
AppliedPolicy defines the observed state of an applied policy.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterStatus](#clusterstatus)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `name` _string_ | name is the name of the VirtualClusterPolicy currently applied to this cluster. | | MinLength: 1 <br /> |
|
||||
| `priorityClass` _string_ | priorityClass is the priority class enforced by the active VirtualClusterPolicy. | | |
|
||||
| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is a node selector enforced by the active VirtualClusterPolicy. | | |
|
||||
|
||||
|
||||
#### Cluster
|
||||
|
||||
|
||||
|
||||
@@ -51,9 +51,11 @@ To see all the available Make commands you can run `make help`, i.e:
|
||||
package Package the k3k and k3k-kubelet Docker images
|
||||
push Push the K3k images to the registry
|
||||
test Run all the tests
|
||||
test-unit Run the unit tests (skips the e2e)
|
||||
test-controller Run the controller tests (pkg/controller)
|
||||
test-kubelet-controller Run the controller tests (pkg/controller)
|
||||
test-unit Run the unit tests (skips the e2e and integration tests)
|
||||
test-kubelet Run the k3k-kubelet controller tests (tests/integration/k3k-kubelet)
|
||||
test-policy Run the policy controller tests (tests/integration/policy)
|
||||
test-cluster Run the cluster controller tests (tests/integration/cluster)
|
||||
test-integration Run the controller tests (pkg/controller)
|
||||
test-e2e Run the e2e tests
|
||||
test-cli Run the cli tests
|
||||
generate Generate the CRDs specs
|
||||
|
||||
60
go.mod
60
go.mod
@@ -1,8 +1,8 @@
|
||||
module github.com/rancher/k3k
|
||||
|
||||
go 1.25
|
||||
go 1.25.0
|
||||
|
||||
toolchain go1.25.6
|
||||
toolchain go1.25.8
|
||||
|
||||
require (
|
||||
github.com/blang/semver/v4 v4.0.0
|
||||
@@ -17,8 +17,8 @@ require (
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/testcontainers/testcontainers-go v0.40.0
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.40.0
|
||||
github.com/testcontainers/testcontainers-go v0.41.0
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803
|
||||
go.etcd.io/etcd/api/v3 v3.5.21
|
||||
go.etcd.io/etcd/client/v3 v3.5.21
|
||||
@@ -41,7 +41,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.19.1 // indirect
|
||||
cel.dev/expr v0.25.1 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
@@ -70,10 +70,10 @@ require (
|
||||
github.com/cyphar/filepath-securejoin v0.5.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker v28.5.1+incompatible // indirect
|
||||
github.com/docker/docker v28.5.2+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/ebitengine/purego v0.10.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
@@ -114,7 +114,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
@@ -130,7 +130,7 @@ require (
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/go-archive v0.1.0 // indirect
|
||||
github.com/moby/go-archive v0.2.0 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
@@ -149,7 +149,7 @@ require (
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
@@ -158,48 +158,48 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.26.2 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.16 // indirect
|
||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel v1.41.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.41.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.41.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.41.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/crypto v0.48.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/oauth2 v0.34.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/term v0.40.0 // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect
|
||||
google.golang.org/grpc v1.68.1 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/grpc v1.79.3 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
|
||||
134
go.sum
134
go.sum
@@ -1,5 +1,5 @@
|
||||
cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4=
|
||||
cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
|
||||
cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=
|
||||
cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
@@ -44,6 +44,8 @@ github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuP
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@@ -86,8 +88,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM=
|
||||
github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
|
||||
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
@@ -100,8 +102,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
|
||||
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -243,8 +245,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
@@ -290,8 +292,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
|
||||
github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8=
|
||||
github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
@@ -338,8 +340,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
|
||||
github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
@@ -359,8 +361,8 @@ github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb
|
||||
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ=
|
||||
github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM=
|
||||
github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o=
|
||||
github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
@@ -371,8 +373,8 @@ github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEV
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
|
||||
github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI=
|
||||
github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||
@@ -410,14 +412,14 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU=
|
||||
github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY=
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.40.0 h1:3w6SjtIp/+FdpjWJCyPqaGWknG2iU6MacEWA7hl0IqQ=
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.40.0/go.mod h1:1xJwmfO2g+XKox9LiJXKGCm1vWp7LozX+78UjXVRbF0=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/testcontainers/testcontainers-go v0.41.0 h1:mfpsD0D36YgkxGj2LrIyxuwQ9i2wCKAD+ESsYM1wais=
|
||||
github.com/testcontainers/testcontainers-go v0.41.0/go.mod h1:pdFrEIfaPl24zmBjerWTTYaY0M6UHsqA1YSvsoU40MI=
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0 h1:xvnllztzVajAMUmeb1BD6XQNacoY4gNEN6Gl1fhzUMI=
|
||||
github.com/testcontainers/testcontainers-go/modules/k3s v0.41.0/go.mod h1:y9YF71J/D1tIoIY09dmtwEXPiHmuvntbK+MWuypq8OQ=
|
||||
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
|
||||
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
|
||||
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
|
||||
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/virtual-kubelet/virtual-kubelet v1.11.1-0.20250530103808-c9f64e872803 h1:0O149bxUoQL69b4+pcGaCbKk2bvA/43AhkczkDuRjMc=
|
||||
@@ -450,8 +452,8 @@ go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU
|
||||
go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
|
||||
@@ -460,8 +462,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c=
|
||||
go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
|
||||
@@ -474,8 +476,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 h1:inYW9ZhgqiDqh6BioM7DVHHzEGVq76Db5897WLGZ5Go=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0/go.mod h1:Izur+Wt8gClgMJqO/cZ8wdeeMryJ/xxiOVgFSSfpDTY=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU=
|
||||
@@ -486,16 +488,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
|
||||
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
|
||||
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||
go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ=
|
||||
go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps=
|
||||
go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8=
|
||||
go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90=
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
|
||||
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y=
|
||||
go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0=
|
||||
go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
@@ -511,8 +513,8 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
@@ -521,8 +523,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -532,18 +534,18 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -555,16 +557,14 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
||||
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -575,14 +575,16 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
@@ -590,17 +592,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
|
||||
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
|
||||
google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE=
|
||||
google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -610,8 +612,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -100,7 +100,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
|
||||
|
||||
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedConfigMap, c.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
syncedIngress := r.ingress(&virtIngress)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -131,8 +131,19 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
}
|
||||
}
|
||||
|
||||
var currentHostPVC v1.PersistentVolumeClaim
|
||||
|
||||
err := r.HostClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(syncedPVC), ¤tHostPVC)
|
||||
if err == nil {
|
||||
log.V(1).Info("persistent volume claim already exist in the host cluster")
|
||||
}
|
||||
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// create the pvc on host
|
||||
log.Info("creating the persistent volume claim for the first time on the host cluster")
|
||||
log.Info("creating the persistent volume claim for the first time in the host cluster")
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
|
||||
@@ -117,7 +117,7 @@ func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Reque
|
||||
|
||||
hostPriorityClass := r.translatePriorityClass(priorityClass)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, hostPriorityClass, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
|
||||
|
||||
syncedSecret := s.translateSecret(&virtualSecret)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedSecret, s.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
syncedService := r.service(&virtService)
|
||||
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
if err := controllerutil.SetOwnerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -276,7 +276,7 @@ func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
|
||||
cfg.AgentHostname,
|
||||
k.port,
|
||||
k.agentIP,
|
||||
utilProvider.HostClient,
|
||||
k.hostMgr,
|
||||
utilProvider.VirtualClient,
|
||||
k.virtualCluster,
|
||||
cfg.Version,
|
||||
@@ -327,8 +327,10 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
|
||||
return err != nil
|
||||
}, func() error {
|
||||
var err error
|
||||
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
logger.Error(err, "decoded bootstrap")
|
||||
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
@@ -388,7 +390,9 @@ func loadTLSConfig(clusterName, clusterNamespace, nodeName, hostname, token, age
|
||||
return err != nil
|
||||
}, func() error {
|
||||
var err error
|
||||
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, errors.New("unable to decode bootstrap: " + err.Error())
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -13,13 +14,13 @@ import (
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostClient client.Client, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
|
||||
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, hostMgr manager.Manager, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
|
||||
ctx := context.Background()
|
||||
|
||||
if mirrorHostNodes {
|
||||
var hostNode corev1.Node
|
||||
if err := hostClient.Get(ctx, types.NamespacedName{Name: node.Name}, &hostNode); err != nil {
|
||||
logger.Error(err, "error getting host node for mirroring", err)
|
||||
if err := hostMgr.GetAPIReader().Get(ctx, types.NamespacedName{Name: node.Name}, &hostNode); err != nil {
|
||||
logger.Error(err, "error getting host node for mirroring")
|
||||
}
|
||||
|
||||
node.Spec = *hostNode.Spec.DeepCopy()
|
||||
@@ -48,7 +49,7 @@ func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servi
|
||||
// configure versions
|
||||
node.Status.NodeInfo.KubeletVersion = version
|
||||
|
||||
startNodeCapacityUpdater(ctx, logger, hostClient, virtualClient, virtualCluster, node.Name)
|
||||
startNodeCapacityUpdater(ctx, logger, hostMgr.GetClient(), virtualClient, virtualCluster, node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@@ -83,11 +84,30 @@ func updateNodeCapacity(ctx context.Context, logger logr.Logger, hostClient clie
|
||||
|
||||
mergedResourceLists := mergeResourceLists(resourceLists...)
|
||||
|
||||
m, err := distributeQuotas(ctx, logger, virtualClient, mergedResourceLists)
|
||||
if err != nil {
|
||||
logger.Error(err, "error distributing policy quota")
|
||||
var virtualNodeList, hostNodeList corev1.NodeList
|
||||
|
||||
if err := virtualClient.List(ctx, &virtualNodeList); err != nil {
|
||||
logger.Error(err, "error listing virtual nodes for stable capacity distribution")
|
||||
}
|
||||
|
||||
virtResourceMap := make(map[string]corev1.ResourceList)
|
||||
for _, vNode := range virtualNodeList.Items {
|
||||
virtResourceMap[vNode.Name] = corev1.ResourceList{}
|
||||
}
|
||||
|
||||
if err := hostClient.List(ctx, &hostNodeList); err != nil {
|
||||
logger.Error(err, "error listing host nodes for stable capacity distribution")
|
||||
}
|
||||
|
||||
hostResourceMap := make(map[string]corev1.ResourceList)
|
||||
|
||||
for _, hNode := range hostNodeList.Items {
|
||||
if _, ok := virtResourceMap[hNode.Name]; ok {
|
||||
hostResourceMap[hNode.Name] = hNode.Status.Allocatable
|
||||
}
|
||||
}
|
||||
|
||||
m := distributeQuotas(hostResourceMap, virtResourceMap, mergedResourceLists)
|
||||
allocatable = m[virtualNodeName]
|
||||
}
|
||||
|
||||
@@ -125,76 +145,99 @@ func mergeResourceLists(resourceLists ...corev1.ResourceList) corev1.ResourceLis
|
||||
return merged
|
||||
}
|
||||
|
||||
// distributeQuotas divides the total resource quotas evenly among all active virtual nodes.
|
||||
// This ensures that each virtual node reports a fair share of the available resources,
|
||||
// preventing the scheduler from overloading a single node.
|
||||
// distributeQuotas divides the total resource quotas among all active virtual nodes,
|
||||
// capped by each node's actual host capacity. This ensures that each virtual node
|
||||
// reports a fair share of the available resources without exceeding what its
|
||||
// underlying host node can provide.
|
||||
//
|
||||
// The algorithm iterates over each resource, divides it as evenly as possible among the
|
||||
// sorted virtual nodes, and distributes any remainder to the first few nodes to ensure
|
||||
// all resources are allocated. Sorting the nodes by name guarantees a deterministic
|
||||
// distribution.
|
||||
func distributeQuotas(ctx context.Context, logger logr.Logger, virtualClient client.Client, quotas corev1.ResourceList) (map[string]corev1.ResourceList, error) {
|
||||
// List all virtual nodes to distribute the quota stably.
|
||||
var virtualNodeList corev1.NodeList
|
||||
if err := virtualClient.List(ctx, &virtualNodeList); err != nil {
|
||||
logger.Error(err, "error listing virtual nodes for stable capacity distribution, falling back to full quota")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there are no virtual nodes, there's nothing to distribute.
|
||||
numNodes := int64(len(virtualNodeList.Items))
|
||||
if numNodes == 0 {
|
||||
logger.Info("error listing virtual nodes for stable capacity distribution, falling back to full quota")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Sort nodes by name for a deterministic distribution of resources.
|
||||
sort.Slice(virtualNodeList.Items, func(i, j int) bool {
|
||||
return virtualNodeList.Items[i].Name < virtualNodeList.Items[j].Name
|
||||
})
|
||||
|
||||
// Initialize the resource map for each virtual node.
|
||||
resourceMap := make(map[string]corev1.ResourceList)
|
||||
for _, virtualNode := range virtualNodeList.Items {
|
||||
resourceMap[virtualNode.Name] = corev1.ResourceList{}
|
||||
}
|
||||
// For each resource type the algorithm uses a multi-pass redistribution loop:
|
||||
// 1. Divide the remaining quota evenly among eligible nodes (sorted by name for
|
||||
// determinism), assigning any integer remainder to the first nodes alphabetically.
|
||||
// 2. Cap each node's share at its host allocatable capacity.
|
||||
// 3. Remove nodes that have reached their host capacity.
|
||||
// 4. If there is still unallocated quota (because some nodes were capped below their
|
||||
// even share), repeat from step 1 with the remaining quota and remaining nodes.
|
||||
//
|
||||
// The loop terminates when the quota is fully distributed or no eligible nodes remain.
|
||||
func distributeQuotas(hostResourceMap, virtResourceMap map[string]corev1.ResourceList, quotas corev1.ResourceList) map[string]corev1.ResourceList {
|
||||
resourceMap := make(map[string]corev1.ResourceList, len(virtResourceMap))
|
||||
maps.Copy(resourceMap, virtResourceMap)
|
||||
|
||||
// Distribute each resource type from the policy's hard quota
|
||||
for resourceName, totalQuantity := range quotas {
|
||||
// Use MilliValue for precise division, especially for resources like CPU,
|
||||
// which are often expressed in milli-units. Otherwise, use the standard Value().
|
||||
var totalValue int64
|
||||
if _, found := milliScaleResources[resourceName]; found {
|
||||
totalValue = totalQuantity.MilliValue()
|
||||
} else {
|
||||
totalValue = totalQuantity.Value()
|
||||
_, useMilli := milliScaleResources[resourceName]
|
||||
|
||||
// eligible nodes for each distribution cycle
|
||||
var eligibleNodes []string
|
||||
|
||||
hostCap := make(map[string]int64)
|
||||
|
||||
// Populate the host nodes capacity map and the initial effective nodes
|
||||
for vn := range virtResourceMap {
|
||||
hostNodeResources := hostResourceMap[vn]
|
||||
if hostNodeResources == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceQuantity, found := hostNodeResources[resourceName]
|
||||
if !found {
|
||||
// skip the node if the resource does not exist on the host node
|
||||
continue
|
||||
}
|
||||
|
||||
hostCap[vn] = resourceQuantity.Value()
|
||||
if useMilli {
|
||||
hostCap[vn] = resourceQuantity.MilliValue()
|
||||
}
|
||||
|
||||
eligibleNodes = append(eligibleNodes, vn)
|
||||
}
|
||||
|
||||
// Calculate the base quantity of the resource to be allocated per node.
|
||||
// and the remainder that needs to be distributed among the nodes.
|
||||
//
|
||||
// For example, if totalValue is 2000 (e.g., 2 CPU) and there are 3 nodes:
|
||||
// - quantityPerNode would be 666 (2000 / 3)
|
||||
// - remainder would be 2 (2000 % 3)
|
||||
// The first two nodes would get 667 (666 + 1), and the last one would get 666.
|
||||
quantityPerNode := totalValue / numNodes
|
||||
remainder := totalValue % numNodes
|
||||
sort.Strings(eligibleNodes)
|
||||
|
||||
// Iterate through the sorted virtual nodes to distribute the resource.
|
||||
for _, virtualNode := range virtualNodeList.Items {
|
||||
nodeQuantity := quantityPerNode
|
||||
if remainder > 0 {
|
||||
nodeQuantity++
|
||||
remainder--
|
||||
totalValue := totalQuantity.Value()
|
||||
if useMilli {
|
||||
totalValue = totalQuantity.MilliValue()
|
||||
}
|
||||
|
||||
// Start of the distribution cycle, each cycle will distribute the quota resource
|
||||
// evenly between nodes, each node can not exceed the corresponding host node capacity
|
||||
for totalValue > 0 && len(eligibleNodes) > 0 {
|
||||
nodeNum := int64(len(eligibleNodes))
|
||||
quantityPerNode := totalValue / nodeNum
|
||||
remainder := totalValue % nodeNum
|
||||
|
||||
remainingNodes := []string{}
|
||||
|
||||
for _, virtualNodeName := range eligibleNodes {
|
||||
nodeQuantity := quantityPerNode
|
||||
if remainder > 0 {
|
||||
nodeQuantity++
|
||||
remainder--
|
||||
}
|
||||
// We cap the quantity to the hostNode capacity
|
||||
nodeQuantity = min(nodeQuantity, hostCap[virtualNodeName])
|
||||
|
||||
if nodeQuantity > 0 {
|
||||
existing := resourceMap[virtualNodeName][resourceName]
|
||||
if useMilli {
|
||||
resourceMap[virtualNodeName][resourceName] = *resource.NewMilliQuantity(existing.MilliValue()+nodeQuantity, totalQuantity.Format)
|
||||
} else {
|
||||
resourceMap[virtualNodeName][resourceName] = *resource.NewQuantity(existing.Value()+nodeQuantity, totalQuantity.Format)
|
||||
}
|
||||
}
|
||||
|
||||
totalValue -= nodeQuantity
|
||||
hostCap[virtualNodeName] -= nodeQuantity
|
||||
|
||||
if hostCap[virtualNodeName] > 0 {
|
||||
remainingNodes = append(remainingNodes, virtualNodeName)
|
||||
}
|
||||
}
|
||||
|
||||
if _, found := milliScaleResources[resourceName]; found {
|
||||
resourceMap[virtualNode.Name][resourceName] = *resource.NewMilliQuantity(nodeQuantity, totalQuantity.Format)
|
||||
} else {
|
||||
resourceMap[virtualNode.Name][resourceName] = *resource.NewQuantity(nodeQuantity, totalQuantity.Format)
|
||||
}
|
||||
eligibleNodes = remainingNodes
|
||||
}
|
||||
}
|
||||
|
||||
return resourceMap, nil
|
||||
return resourceMap
|
||||
}
|
||||
|
||||
@@ -1,19 +1,13 @@
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func Test_distributeQuotas(t *testing.T) {
|
||||
@@ -21,39 +15,56 @@ func Test_distributeQuotas(t *testing.T) {
|
||||
err := corev1.AddToScheme(scheme)
|
||||
assert.NoError(t, err)
|
||||
|
||||
node1 := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}
|
||||
node2 := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-2"}}
|
||||
node3 := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node-3"}}
|
||||
// Large allocatable so capping doesn't interfere with basic distribution tests.
|
||||
largeAllocatable := corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
corev1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
corev1.ResourcePods: resource.MustParse("1000"),
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
virtualNodes []client.Object
|
||||
quotas corev1.ResourceList
|
||||
want map[string]corev1.ResourceList
|
||||
wantErr bool
|
||||
name string
|
||||
virtResourceMap map[string]corev1.ResourceList
|
||||
hostResourceMap map[string]corev1.ResourceList
|
||||
quotas corev1.ResourceList
|
||||
want map[string]corev1.ResourceList
|
||||
}{
|
||||
{
|
||||
name: "no virtual nodes",
|
||||
virtualNodes: []client.Object{},
|
||||
name: "no virtual nodes",
|
||||
virtResourceMap: map[string]corev1.ResourceList{},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{},
|
||||
wantErr: false,
|
||||
want: map[string]corev1.ResourceList{},
|
||||
},
|
||||
{
|
||||
name: "no quotas",
|
||||
virtualNodes: []client.Object{node1, node2},
|
||||
quotas: corev1.ResourceList{},
|
||||
name: "no quotas",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "even distribution of cpu and memory",
|
||||
virtualNodes: []client.Object{node1, node2},
|
||||
name: "fewer virtual nodes than host nodes",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
"node-3": largeAllocatable,
|
||||
"node-4": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
@@ -68,65 +79,203 @@ func Test_distributeQuotas(t *testing.T) {
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "uneven distribution with remainder",
|
||||
virtualNodes: []client.Object{node1, node2, node3},
|
||||
name: "even distribution of cpu and memory",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"), // 2000m / 3 = 666m with 2m remainder
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "uneven distribution with remainder",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
"node-3": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {corev1.ResourceCPU: resource.MustParse("667m")},
|
||||
"node-2": {corev1.ResourceCPU: resource.MustParse("667m")},
|
||||
"node-3": {corev1.ResourceCPU: resource.MustParse("666m")},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "distribution of number resources",
|
||||
virtualNodes: []client.Object{node1, node2, node3},
|
||||
name: "distribution of number resources",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": largeAllocatable,
|
||||
"node-2": largeAllocatable,
|
||||
"node-3": largeAllocatable,
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourcePods: resource.MustParse("11"),
|
||||
corev1.ResourceSecrets: resource.MustParse("9"),
|
||||
"custom": resource.MustParse("8"),
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourcePods: resource.MustParse("11"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
corev1.ResourceSecrets: resource.MustParse("3"),
|
||||
"custom": resource.MustParse("3"),
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
corev1.ResourceSecrets: resource.MustParse("3"),
|
||||
"custom": resource.MustParse("3"),
|
||||
corev1.ResourceCPU: resource.MustParse("667m"),
|
||||
corev1.ResourcePods: resource.MustParse("4"),
|
||||
},
|
||||
"node-3": {
|
||||
corev1.ResourceCPU: resource.MustParse("666m"),
|
||||
corev1.ResourcePods: resource.MustParse("3"),
|
||||
corev1.ResourceSecrets: resource.MustParse("3"),
|
||||
"custom": resource.MustParse("2"),
|
||||
corev1.ResourceCPU: resource.MustParse("666m"),
|
||||
corev1.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "extended resource distributed only to nodes that have it",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
},
|
||||
"node-3": {
|
||||
corev1.ResourceCPU: resource.MustParse("100"),
|
||||
"nvidia.com/gpu": resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("3"),
|
||||
"nvidia.com/gpu": resource.MustParse("4"),
|
||||
},
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
},
|
||||
"node-3": {
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "capping at host capacity with redistribution",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
corev1.ResourceCPU: resource.MustParse("8"),
|
||||
},
|
||||
"node-2": {
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("6"),
|
||||
},
|
||||
// Even split would be 3 each, but node-2 only has 2 CPU.
|
||||
// node-2 gets capped at 2, the remaining 1 goes to node-1.
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {corev1.ResourceCPU: resource.MustParse("4")},
|
||||
"node-2": {corev1.ResourceCPU: resource.MustParse("2")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gpu capping with uneven host capacity",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
"nvidia.com/gpu": resource.MustParse("6"),
|
||||
},
|
||||
"node-2": {
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
"nvidia.com/gpu": resource.MustParse("4"),
|
||||
},
|
||||
// Even split would be 2 each, but node-2 only has 1 GPU.
|
||||
// node-2 gets capped at 1, the remaining 1 goes to node-1.
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {"nvidia.com/gpu": resource.MustParse("3")},
|
||||
"node-2": {"nvidia.com/gpu": resource.MustParse("1")},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "quota exceeds total host capacity",
|
||||
virtResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {},
|
||||
"node-2": {},
|
||||
"node-3": {},
|
||||
},
|
||||
hostResourceMap: map[string]corev1.ResourceList{
|
||||
"node-1": {
|
||||
"nvidia.com/gpu": resource.MustParse("2"),
|
||||
},
|
||||
"node-2": {
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
"node-3": {
|
||||
"nvidia.com/gpu": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
quotas: corev1.ResourceList{
|
||||
"nvidia.com/gpu": resource.MustParse("10"),
|
||||
},
|
||||
// Total host capacity is 4, quota is 10. Each node gets its full capacity.
|
||||
want: map[string]corev1.ResourceList{
|
||||
"node-1": {"nvidia.com/gpu": resource.MustParse("2")},
|
||||
"node-2": {"nvidia.com/gpu": resource.MustParse("1")},
|
||||
"node-3": {"nvidia.com/gpu": resource.MustParse("1")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.virtualNodes...).Build()
|
||||
logger := zapr.NewLogger(zap.NewNop())
|
||||
|
||||
got, gotErr := distributeQuotas(context.Background(), logger, fakeClient, tt.quotas)
|
||||
if tt.wantErr {
|
||||
assert.Error(t, gotErr)
|
||||
} else {
|
||||
assert.NoError(t, gotErr)
|
||||
}
|
||||
got := distributeQuotas(tt.hostResourceMap, tt.virtResourceMap, tt.quotas)
|
||||
|
||||
assert.Equal(t, len(tt.want), len(got), "Number of nodes in result should match")
|
||||
|
||||
|
||||
@@ -398,31 +398,67 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
logger = logger.WithValues("pod", hostPod.Name)
|
||||
|
||||
// Schedule the host pod in the same host node of the virtual kubelet
|
||||
hostPod.Spec.NodeName = p.agentHostname
|
||||
// Clear the NodeName to allow scheduling, and set affinity to prefer scheduling the Pod on the same host node as the virtual kubelet,
|
||||
// unless the user has specified their own affinity, in which case the user's affinity is respected.
|
||||
|
||||
hostPod.Spec.NodeName = ""
|
||||
|
||||
if hostPod.Spec.Affinity == nil {
|
||||
hostPod.Spec.Affinity = &corev1.Affinity{
|
||||
NodeAffinity: &corev1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{{
|
||||
Weight: 100,
|
||||
Preference: corev1.NodeSelectorTerm{
|
||||
MatchExpressions: []corev1.NodeSelectorRequirement{{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: corev1.NodeSelectorOpIn,
|
||||
Values: []string{p.agentHostname},
|
||||
}},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// The pod's own nodeSelector is ignored.
|
||||
// The final selector is determined by the cluster spec, but overridden by a policy if present.
|
||||
hostPod.Spec.NodeSelector = cluster.Spec.NodeSelector
|
||||
if cluster.Status.Policy != nil && len(cluster.Status.Policy.NodeSelector) > 0 {
|
||||
hostPod.Spec.NodeSelector = cluster.Status.Policy.NodeSelector
|
||||
}
|
||||
|
||||
// setting the hostname for the pod if its not set
|
||||
if virtualPod.Spec.Hostname == "" {
|
||||
hostPod.Spec.Hostname = k3kcontroller.SafeConcatName(virtualPod.Name)
|
||||
}
|
||||
|
||||
// if the priorityClass for the virtual cluster is set then override the provided value
|
||||
// When a PriorityClass is set we will use the translated one in the HostCluster.
|
||||
// If the Cluster or a Policy defines a PriorityClass of the host we are going to use that one.
|
||||
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
|
||||
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
|
||||
if !strings.HasPrefix(hostPod.Spec.PriorityClassName, "system-") {
|
||||
if hostPod.Spec.PriorityClassName != "" {
|
||||
tPriorityClassName := p.Translator.TranslateName("", hostPod.Spec.PriorityClassName)
|
||||
hostPod.Spec.PriorityClassName = tPriorityClassName
|
||||
//
|
||||
// TODO: we probably need to define a custom "intermediate" k3k-system-* priority
|
||||
if strings.HasPrefix(virtualPod.Spec.PriorityClassName, "system-") {
|
||||
hostPod.Spec.PriorityClassName = virtualPod.Spec.PriorityClassName
|
||||
} else {
|
||||
enforcedPriorityClassName := cluster.Spec.PriorityClass
|
||||
if cluster.Status.Policy != nil && cluster.Status.Policy.PriorityClass != nil {
|
||||
enforcedPriorityClassName = *cluster.Status.Policy.PriorityClass
|
||||
}
|
||||
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
hostPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
if enforcedPriorityClassName != "" {
|
||||
hostPod.Spec.PriorityClassName = enforcedPriorityClassName
|
||||
} else if virtualPod.Spec.PriorityClassName != "" {
|
||||
hostPod.Spec.PriorityClassName = p.Translator.TranslateName("", virtualPod.Spec.PriorityClassName)
|
||||
hostPod.Spec.Priority = nil
|
||||
}
|
||||
}
|
||||
|
||||
// if the priority class is set we need to remove the priority
|
||||
if hostPod.Spec.PriorityClassName != "" {
|
||||
hostPod.Spec.Priority = nil
|
||||
}
|
||||
|
||||
p.configurePodEnvs(hostPod, &virtualPod)
|
||||
|
||||
// fieldpath annotations
|
||||
|
||||
@@ -538,6 +538,12 @@ type ClusterStatus struct {
|
||||
// +optional
|
||||
PolicyName string `json:"policyName,omitempty"`
|
||||
|
||||
// policy represents the status of the policy applied to this cluster.
|
||||
// This field is set by the VirtualClusterPolicy controller.
|
||||
//
|
||||
// +optional
|
||||
Policy *AppliedPolicy `json:"policy,omitempty"`
|
||||
|
||||
// KubeletPort specefies the port used by k3k-kubelet in shared mode.
|
||||
//
|
||||
// +optional
|
||||
@@ -561,6 +567,25 @@ type ClusterStatus struct {
|
||||
Phase ClusterPhase `json:"phase,omitempty"`
|
||||
}
|
||||
|
||||
// AppliedPolicy defines the observed state of an applied policy.
|
||||
type AppliedPolicy struct {
|
||||
// name is the name of the VirtualClusterPolicy currently applied to this cluster.
|
||||
//
|
||||
// +kubebuilder:validation:MinLength:=1
|
||||
// +required
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// priorityClass is the priority class enforced by the active VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
PriorityClass *string `json:"priorityClass,omitempty"`
|
||||
|
||||
// nodeSelector is a node selector enforced by the active VirtualClusterPolicy.
|
||||
//
|
||||
// +optional
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterPhase is a high-level summary of the cluster's current lifecycle state.
|
||||
type ClusterPhase string
|
||||
|
||||
|
||||
@@ -25,6 +25,33 @@ func (in *Addon) DeepCopy() *Addon {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppliedPolicy) DeepCopyInto(out *AppliedPolicy) {
|
||||
*out = *in
|
||||
if in.PriorityClass != nil {
|
||||
in, out := &in.PriorityClass, &out.PriorityClass
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedPolicy.
|
||||
func (in *AppliedPolicy) DeepCopy() *AppliedPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AppliedPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Cluster) DeepCopyInto(out *Cluster) {
|
||||
*out = *in
|
||||
@@ -200,6 +227,11 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Policy != nil {
|
||||
in, out := &in.Policy, &out.Policy
|
||||
*out = new(AppliedPolicy)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
|
||||
@@ -193,7 +193,9 @@ func (p *StatefulSetReconciler) getETCDTLS(ctx context.Context, cluster *v1beta1
|
||||
return true
|
||||
}, func() error {
|
||||
var err error
|
||||
|
||||
b, err = bootstrap.DecodedBootstrap(token, endpoint)
|
||||
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -2,13 +2,17 @@ package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -52,15 +56,36 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
selector := labels.NewSelector()
|
||||
currentPolicyName := ns.Labels[PolicyNameLabelKey]
|
||||
|
||||
if req, err := labels.NewRequirement(ManagedByLabelKey, selection.Equals, []string{VirtualPolicyControllerName}); err == nil {
|
||||
selector = selector.Add(*req)
|
||||
}
|
||||
// This will match all the resources managed by the K3k Policy controller
|
||||
// that have the app.kubernetes.io/managed-by=k3k-policy-controller label
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
ManagedByLabelKey: VirtualPolicyControllerName,
|
||||
})
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
if ns.Labels[PolicyNameLabelKey] != "" {
|
||||
requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]})
|
||||
// If the namespace is not bound to any policy, or if the policy it was bound to no longer exists,
|
||||
// we need to clear policy-related fields on its Cluster objects.
|
||||
if currentPolicyName == "" {
|
||||
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
|
||||
log.Error(err, "error clearing policy fields for clusters in unbound namespace", "namespace", ns.Name)
|
||||
}
|
||||
} else {
|
||||
var policy v1beta1.VirtualClusterPolicy
|
||||
if err := c.Client.Get(ctx, types.NamespacedName{Name: currentPolicyName}, &policy); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
if err := c.clearPolicyFieldsForClustersInNamespace(ctx, ns.Name); err != nil {
|
||||
log.Error(err, "error clearing policy fields for clusters in namespace with non-existent policy", "namespace", ns.Name, "policy", currentPolicyName)
|
||||
}
|
||||
} else {
|
||||
log.Error(err, "error getting policy for namespace", "namespace", ns.Name, "policy", currentPolicyName)
|
||||
}
|
||||
}
|
||||
|
||||
// if the namespace is bound to a policy -> cleanup resources of other policies
|
||||
requirement, err := labels.NewRequirement(
|
||||
PolicyNameLabelKey, selection.NotEquals, []string{currentPolicyName},
|
||||
)
|
||||
|
||||
// log the error but continue cleaning up the other namespaces
|
||||
if err != nil {
|
||||
@@ -90,3 +115,30 @@ func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearPolicyFieldsForClustersInNamespace sets the policy status on Cluster objects in the given namespace to nil.
|
||||
func (c *VirtualClusterPolicyReconciler) clearPolicyFieldsForClustersInNamespace(ctx context.Context, namespace string) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
var clusters v1beta1.ClusterList
|
||||
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace)); err != nil {
|
||||
return fmt.Errorf("failed listing clusters in namespace %s: %w", namespace, err)
|
||||
}
|
||||
|
||||
var updateErrs []error
|
||||
|
||||
for i := range clusters.Items {
|
||||
cluster := clusters.Items[i]
|
||||
if cluster.Status.Policy != nil {
|
||||
log.V(1).Info("Clearing policy status for Cluster", "cluster", cluster.Name, "namespace", namespace)
|
||||
cluster.Status.Policy = nil
|
||||
|
||||
if updateErr := c.Client.Status().Update(ctx, &cluster); updateErr != nil {
|
||||
updateErr = fmt.Errorf("failed updating Status for Cluster %s: %w", cluster.Name, updateErr)
|
||||
updateErrs = append(updateErrs, updateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(updateErrs...)
|
||||
}
|
||||
|
||||
@@ -470,16 +470,21 @@ func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context,
|
||||
var clusterUpdateErrs []error
|
||||
|
||||
for _, cluster := range clusters.Items {
|
||||
orig := cluster.DeepCopy()
|
||||
origStatus := cluster.Status.DeepCopy()
|
||||
|
||||
cluster.Spec.PriorityClass = policy.Spec.DefaultPriorityClass
|
||||
cluster.Spec.NodeSelector = policy.Spec.DefaultNodeSelector
|
||||
cluster.Status.Policy = &v1beta1.AppliedPolicy{
|
||||
Name: policy.Name,
|
||||
PriorityClass: &policy.Spec.DefaultPriorityClass,
|
||||
NodeSelector: policy.Spec.DefaultNodeSelector,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(orig, cluster) {
|
||||
if !reflect.DeepEqual(origStatus, &cluster.Status) {
|
||||
log.V(1).Info("Updating Cluster", "cluster", cluster.Name, "namespace", namespace.Name)
|
||||
|
||||
// continue updating also the other clusters even if an error occurred
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, c.Client.Update(ctx, &cluster))
|
||||
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
|
||||
clusterUpdateErrs = append(clusterUpdateErrs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ echo "Building k3k... [cli os/arch: $(go env GOOS)/$(go env GOARCH)]"
|
||||
echo "Current TAG: ${VERSION} "
|
||||
|
||||
export CGO_ENABLED=0
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k-kubelet ./k3k-kubelet
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]+"${build_args[@]}"}" -o bin/k3k
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]+"${build_args[@]}"}" -o bin/k3k-kubelet ./k3k-kubelet
|
||||
|
||||
# build the cli for the local OS and ARCH
|
||||
go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3kcli ./cli
|
||||
go build -ldflags="${LDFLAGS}" "${build_args[@]+"${build_args[@]}"}" -o bin/k3kcli ./cli
|
||||
|
||||
@@ -14,7 +14,6 @@ go run sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_TOOLS_VERSIO
|
||||
|
||||
# add the 'helm.sh/resource-policy: keep' annotation to the CRDs
|
||||
for f in ./charts/k3k/templates/crds/*.yaml; do
|
||||
sed -i '0,/^[[:space:]]*annotations:/s/^[[:space:]]*annotations:/&\n helm.sh\/resource-policy: keep/' "$f"
|
||||
echo "Validating $f"
|
||||
yq . "$f" > /dev/null
|
||||
echo "Annotating $f"
|
||||
yq -c -i '.metadata.annotations["helm.sh/resource-policy"] = "keep"' "$f"
|
||||
done
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package k3k_test
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -78,6 +78,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
Eventually(func() string {
|
||||
stdout, stderr, err := K3kcli("cluster", "list", "-n", clusterNamespace)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
|
||||
return stdout + stderr
|
||||
}).
|
||||
WithTimeout(time.Second * 5).
|
||||
@@ -164,6 +165,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
})
|
||||
|
||||
var ns v1.Namespace
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
@@ -230,6 +232,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
|
||||
// Verify the cluster state was actually updated
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.Servers).To(Not(BeNil()))
|
||||
@@ -263,6 +266,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
|
||||
// Verify the cluster state was actually updated
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.Version).To(Equal(k3sVersion))
|
||||
@@ -295,6 +299,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
|
||||
// Verify the cluster version was NOT changed
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.Version).To(Equal(k3sVersion))
|
||||
@@ -339,6 +344,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
|
||||
// Verify the cluster labels were actually updated
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Labels).To(HaveKeyWithValue("env", "test"))
|
||||
@@ -372,6 +378,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
|
||||
// Verify the cluster annotations were actually updated
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Annotations).To(HaveKeyWithValue("description", "test-cluster"))
|
||||
55
tests/cli/common_test.go
Normal file
55
tests/cli/common_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func NewNamespace() *v1.Namespace {
|
||||
GinkgoHelper()
|
||||
|
||||
namespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
|
||||
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return namespace
|
||||
}
|
||||
|
||||
func DeleteNamespaces(names ...string) {
|
||||
GinkgoHelper()
|
||||
|
||||
if _, found := os.LookupEnv("KEEP_NAMESPACES"); found {
|
||||
By(fmt.Sprintf("Keeping namespace %v", names))
|
||||
return
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(names))
|
||||
|
||||
for _, name := range names {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
|
||||
By(fmt.Sprintf("Deleting namespace %s", name))
|
||||
|
||||
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
|
||||
GracePeriodSeconds: ptr.To[int64](0),
|
||||
})
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
56
tests/cli/k8s_restclientgetter_test.go
Normal file
56
tests/cli/k8s_restclientgetter_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
memory "k8s.io/client-go/discovery/cached"
|
||||
)
|
||||
|
||||
type RESTClientGetter struct {
|
||||
clientconfig clientcmd.ClientConfig
|
||||
restConfig *rest.Config
|
||||
discoveryClient discovery.CachedDiscoveryInterface
|
||||
}
|
||||
|
||||
func NewRESTClientGetter(kubeconfig []byte) (*RESTClientGetter, error) {
|
||||
clientconfig, err := clientcmd.NewClientConfigFromBytes([]byte(kubeconfig))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
restConfig, err := clientconfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dc, err := discovery.NewDiscoveryClientForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &RESTClientGetter{
|
||||
clientconfig: clientconfig,
|
||||
restConfig: restConfig,
|
||||
discoveryClient: memory.NewMemCacheClient(dc),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *RESTClientGetter) ToRESTConfig() (*rest.Config, error) {
|
||||
return r.restConfig, nil
|
||||
}
|
||||
|
||||
func (r *RESTClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
|
||||
return r.discoveryClient, nil
|
||||
}
|
||||
|
||||
func (r *RESTClientGetter) ToRESTMapper() (meta.RESTMapper, error) {
|
||||
return restmapper.NewDeferredDiscoveryRESTMapper(r.discoveryClient), nil
|
||||
}
|
||||
|
||||
func (r *RESTClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig {
|
||||
return r.clientconfig
|
||||
}
|
||||
234
tests/cli/tests_suite_test.go
Normal file
234
tests/cli/tests_suite_test.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/k3s"
|
||||
"go.uber.org/zap"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
k3kNamespace = "k3k-system"
|
||||
|
||||
k3sVersion = "v1.35.2-k3s1"
|
||||
k3sOldVersion = "v1.35.0-k3s1"
|
||||
)
|
||||
|
||||
func TestTests(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Tests Suite")
|
||||
}
|
||||
|
||||
var (
|
||||
k3sContainer *k3s.K3sContainer
|
||||
restcfg *rest.Config
|
||||
k8s *kubernetes.Clientset
|
||||
k8sClient client.Client
|
||||
kubeconfigPath string
|
||||
helmActionConfig *action.Configuration
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
|
||||
|
||||
if dockerInstallEnabled {
|
||||
repo := os.Getenv("REPO")
|
||||
if repo == "" {
|
||||
repo = "rancher"
|
||||
}
|
||||
|
||||
installK3SDocker(ctx, repo+"/k3k", repo+"/k3k-kubelet")
|
||||
initKubernetesClient()
|
||||
installK3kChart(repo+"/k3k", repo+"/k3k-kubelet")
|
||||
} else {
|
||||
initKubernetesClient()
|
||||
}
|
||||
})
|
||||
|
||||
func initKubernetesClient() {
|
||||
var (
|
||||
err error
|
||||
kubeconfig []byte
|
||||
)
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
log.SetLogger(zapr.NewLogger(logger))
|
||||
|
||||
kubeconfigPath := os.Getenv("KUBECONFIG")
|
||||
Expect(kubeconfigPath).To(Not(BeEmpty()))
|
||||
|
||||
kubeconfig, err = os.ReadFile(kubeconfigPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
restcfg, err = clientcmd.RESTConfigFromKubeConfig(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k8s, err = kubernetes.NewForConfig(restcfg)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
scheme := buildScheme()
|
||||
k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return scheme
|
||||
}
|
||||
|
||||
func installK3SDocker(ctx context.Context, controllerImage, kubeletImage string) {
|
||||
var (
|
||||
err error
|
||||
kubeconfig []byte
|
||||
)
|
||||
|
||||
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
|
||||
if k3sHostVersion == "" {
|
||||
k3sHostVersion = k3sVersion
|
||||
}
|
||||
|
||||
k3sHostVersion = strings.ReplaceAll(k3sHostVersion, "+", "-")
|
||||
|
||||
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:"+k3sHostVersion)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
containerIP, err := k3sContainer.ContainerIP(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Println("K3s containerIP: " + containerIP)
|
||||
|
||||
kubeconfig, err = k3sContainer.GetKubeConfig(context.Background())
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
tmpFile, err := os.CreateTemp("", "kubeconfig-")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
_, err = tmpFile.Write(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(tmpFile.Close()).To(Succeed())
|
||||
kubeconfigPath = tmpFile.Name()
|
||||
|
||||
err = k3sContainer.LoadImages(ctx, controllerImage+":dev", kubeletImage+":dev")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
DeferCleanup(os.Remove, kubeconfigPath)
|
||||
|
||||
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
|
||||
GinkgoWriter.Printf("KUBECONFIG set to: %s\n", kubeconfigPath)
|
||||
}
|
||||
|
||||
func installK3kChart(controllerImage, kubeletImage string) {
|
||||
pwd, err := os.Getwd()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k3kChart, err := loader.Load(path.Join(pwd, "../../charts/k3k"))
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
helmActionConfig = new(action.Configuration)
|
||||
|
||||
kubeconfig, err := os.ReadFile(kubeconfigPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
restClientGetter, err := NewRESTClientGetter(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
|
||||
GinkgoWriter.Printf("[Helm] "+format+"\n", v...)
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
iCli := action.NewInstall(helmActionConfig)
|
||||
iCli.ReleaseName = "k3k"
|
||||
iCli.Namespace = k3kNamespace
|
||||
iCli.CreateNamespace = true
|
||||
iCli.Timeout = time.Minute
|
||||
iCli.Wait = true
|
||||
|
||||
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
|
||||
|
||||
extraEnvArray, _ := controllerMap["extraEnv"].([]map[string]any)
|
||||
extraEnvArray = append(extraEnvArray, map[string]any{
|
||||
"name": "DEBUG",
|
||||
"value": "true",
|
||||
})
|
||||
controllerMap["extraEnv"] = extraEnvArray
|
||||
|
||||
imageMap, _ := controllerMap["image"].(map[string]any)
|
||||
maps.Copy(imageMap, map[string]any{
|
||||
"repository": controllerImage,
|
||||
"tag": "dev",
|
||||
"pullPolicy": "IfNotPresent",
|
||||
})
|
||||
|
||||
agentMap, _ := k3kChart.Values["agent"].(map[string]any)
|
||||
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
|
||||
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
|
||||
maps.Copy(sharedAgentImageMap, map[string]any{
|
||||
"repository": kubeletImage,
|
||||
"tag": "dev",
|
||||
})
|
||||
|
||||
release, err := iCli.Run(k3kChart, k3kChart.Values)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Printf("Helm release '%s' installed in '%s' namespace\n", release.Name, release.Namespace)
|
||||
}
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
if k3sContainer != nil {
|
||||
// dump k3s logs
|
||||
k3sLogs, err := k3sContainer.Logs(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
writeLogs("k3s.log", k3sLogs)
|
||||
|
||||
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
|
||||
}
|
||||
})
|
||||
|
||||
func writeLogs(filename string, logs io.ReadCloser) {
|
||||
logsStr, err := io.ReadAll(logs)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
tempfile := path.Join(os.TempDir(), filename)
|
||||
err = os.WriteFile(tempfile, []byte(logsStr), 0o644)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Println("logs written to: " + filename)
|
||||
|
||||
_ = logs.Close()
|
||||
}
|
||||
341
tests/e2e/cluster_app_test.go
Normal file
341
tests/e2e/cluster_app_test.go
Normal file
@@ -0,0 +1,341 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
translator *translate.ToHostTranslator
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
translator = translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Deployment with a PVC", func() {
|
||||
var (
|
||||
deployment *appsv1.Deployment
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
|
||||
namespace = "default"
|
||||
labels = map[string]string{
|
||||
"app": "k3k-deployment-test-app",
|
||||
}
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
By("Creating the PVC")
|
||||
|
||||
pvc = &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "k3k-test-app-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pvc, err = virtualCluster.Client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Creating the Deployment")
|
||||
|
||||
deployment = &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "k3k-test-app-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: ptr.To[int32](3),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
VolumeMounts: []v1.VolumeMount{{
|
||||
Name: "data-volume",
|
||||
MountPath: "/data",
|
||||
}},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{{
|
||||
Name: "data-volume",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
deployment, err = virtualCluster.Client.AppsV1().Deployments(namespace).Create(ctx, deployment, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should bound the PVC in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
virtualPVC, err := virtualCluster.Client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(virtualPVC.Status.Phase).To(Equal(v1.ClaimBound))
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should bound the PVC in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
hostPVCName := translator.NamespacedName(pvc)
|
||||
|
||||
hostPVC, err := k8s.CoreV1().PersistentVolumeClaims(hostPVCName.Namespace).Get(ctx, hostPVCName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(hostPVC.Status.Phase).To(Equal(v1.ClaimBound))
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should have the Pods running in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pods.Items).Should(HaveLen(int(*deployment.Spec.Replicas)))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should have the Pods running in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(deployment.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pods.Items).Should(HaveLen(int(*deployment.Spec.Replicas)))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
hostPodName := translator.NamespacedName(&pod)
|
||||
|
||||
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a StatefulSet with a PVC", func() {
|
||||
var (
|
||||
statefulSet *appsv1.StatefulSet
|
||||
|
||||
namespace = "default"
|
||||
labels = map[string]string{
|
||||
"app": "k3k-sts-test-app",
|
||||
}
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
namespace := "default"
|
||||
|
||||
By("Creating the StatefulSet")
|
||||
|
||||
statefulSet = &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "k3k-sts-test-app-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Replicas: ptr.To[int32](3),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
VolumeMounts: []v1.VolumeMount{{
|
||||
Name: "www",
|
||||
MountPath: "/usr/share/nginx/html",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeClaimTemplates: []v1.PersistentVolumeClaim{{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "www",
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
statefulSet, err = virtualCluster.Client.AppsV1().StatefulSets(namespace).Create(ctx, statefulSet, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should bound the PVCs in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pvcs, err := virtualCluster.Client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, pvc := range pvcs.Items {
|
||||
g.Expect(pvc.Status.Phase).To(Equal(v1.ClaimBound))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should bound the PVCs in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pvcs, err := virtualCluster.Client.CoreV1().PersistentVolumeClaims(statefulSet.Namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, pvc := range pvcs.Items {
|
||||
hostPVCName := translator.NamespacedName(&pvc)
|
||||
|
||||
hostPVC, err := k8s.CoreV1().PersistentVolumeClaims(hostPVCName.Namespace).Get(ctx, hostPVCName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(hostPVC.Status.Phase).To(Equal(v1.ClaimBound))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should have the Pods running in the virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pods.Items).Should(HaveLen(int(*statefulSet.Spec.Replicas)))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should have the Pods running in the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
labelSelector := metav1.FormatLabelSelector(statefulSet.Spec.Selector)
|
||||
listOpts := metav1.ListOptions{LabelSelector: labelSelector}
|
||||
|
||||
pods, err := virtualCluster.Client.CoreV1().Pods(namespace).List(ctx, listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pods.Items).Should(HaveLen(int(*statefulSet.Spec.Replicas)))
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
hostPodName := translator.NamespacedName(&pod)
|
||||
|
||||
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
}
|
||||
}).
|
||||
WithPolling(time.Second * 3).
|
||||
WithTimeout(time.Minute * 3).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -36,18 +36,24 @@ var _ = When("a cluster with custom certificates is installed with individual ce
|
||||
}
|
||||
|
||||
for _, certName := range certList {
|
||||
var cert, key []byte
|
||||
var err error
|
||||
var (
|
||||
cert, key []byte
|
||||
err error
|
||||
)
|
||||
|
||||
filePathPrefix := ""
|
||||
certfile := certName
|
||||
|
||||
if strings.HasPrefix(certName, "etcd") {
|
||||
filePathPrefix = "etcd/"
|
||||
certfile = strings.TrimPrefix(certName, "etcd-")
|
||||
}
|
||||
|
||||
if !strings.Contains(certName, "service") {
|
||||
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
|
||||
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -75,6 +75,7 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
|
||||
Eventually(func() any {
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
return serverPods[0].DeletionTimestamp
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
@@ -87,7 +88,9 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
|
||||
|
||||
Eventually(func() bool {
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
|
||||
var unknownAuthorityErr x509.UnknownAuthorityError
|
||||
|
||||
return errors.As(err, &unknownAuthorityErr)
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
@@ -99,6 +102,7 @@ var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(per
|
||||
Eventually(func() error {
|
||||
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
@@ -20,16 +20,132 @@ import (
|
||||
)
|
||||
|
||||
var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
translator *translate.ToHostTranslator
|
||||
)
|
||||
|
||||
BeforeAll(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
translator = translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Pod without any Affinity", func() {
|
||||
var pod *v1.Pod
|
||||
|
||||
BeforeAll(func() {
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
pod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "nginx-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
pod, err = virtualCluster.Client.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should have the default Affinity", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
hostPodName := translator.NamespacedName(pod)
|
||||
|
||||
hostPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(hostPod.Spec.Affinity).To(Not(BeNil()))
|
||||
g.Expect(hostPod.Spec.Affinity.NodeAffinity).To(Not(BeNil()))
|
||||
g.Expect(hostPod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution).To(Not(BeNil()))
|
||||
|
||||
preferredScheduling := hostPod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
g.Expect(preferredScheduling).To(Not(BeEmpty()))
|
||||
g.Expect(preferredScheduling[0].Weight).To(Equal(int32(100)))
|
||||
g.Expect(preferredScheduling[0].Preference.MatchExpressions).To(Not(BeEmpty()))
|
||||
g.Expect(preferredScheduling[0].Preference.MatchExpressions[0].Key).To(Equal("kubernetes.io/hostname"))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Pod with an Affinity", func() {
|
||||
var pod *v1.Pod
|
||||
|
||||
BeforeAll(func() {
|
||||
var err error
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
pod = &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "nginx-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
}},
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: v1.NodeSelectorOpNotIn,
|
||||
Values: []string{"fake"},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod, err = virtualCluster.Client.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should not have the default Affinity", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
hostPodName := translator.NamespacedName(pod)
|
||||
|
||||
hostPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(hostPod.Spec.Affinity).To(Not(BeNil()))
|
||||
g.Expect(hostPod.Spec.Affinity.NodeAffinity).To(Not(BeNil()))
|
||||
g.Expect(hostPod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution).To(Not(BeNil()))
|
||||
|
||||
requiredScheduling := hostPod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
g.Expect(requiredScheduling).To(Not(BeNil()))
|
||||
g.Expect(requiredScheduling.NodeSelectorTerms).To(Not(BeEmpty()))
|
||||
g.Expect(requiredScheduling.NodeSelectorTerms[0].MatchExpressions).To(Not(BeEmpty()))
|
||||
g.Expect(requiredScheduling.NodeSelectorTerms[0].MatchExpressions[0].Key).To(Equal("kubernetes.io/hostname"))
|
||||
g.Expect(requiredScheduling.NodeSelectorTerms[0].MatchExpressions[0].Values).To(ContainElement("fake"))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Pod with an invalid configuration", func() {
|
||||
var virtualPod *v1.Pod
|
||||
|
||||
@@ -89,6 +205,7 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var err error
|
||||
|
||||
virtualPod, err = virtualCluster.Client.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{})
|
||||
@@ -110,6 +227,7 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
g.Expect(envVars).NotTo(BeEmpty())
|
||||
|
||||
var found bool
|
||||
|
||||
for _, envVar := range envVars {
|
||||
if envVar.Name == "POD_NAME" {
|
||||
found = true
|
||||
@@ -117,9 +235,11 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
g.Expect(envVars[0].ValueFrom).NotTo(BeNil())
|
||||
g.Expect(envVars[0].ValueFrom.FieldRef).NotTo(BeNil())
|
||||
g.Expect(envVars[0].ValueFrom.FieldRef.FieldPath).To(Equal("metadata.name"))
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
g.Expect(found).To(BeTrue())
|
||||
|
||||
containerStatuses := pod.Status.ContainerStatuses
|
||||
@@ -136,7 +256,6 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
By("Checking the container status of the Pod in the Host Cluster")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
translator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
hostPodName := translator.NamespacedName(virtualPod)
|
||||
|
||||
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
@@ -148,15 +267,18 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
g.Expect(envVars).NotTo(BeEmpty())
|
||||
|
||||
var found bool
|
||||
|
||||
for _, envVar := range envVars {
|
||||
if envVar.Name == "POD_NAME" {
|
||||
found = true
|
||||
|
||||
g.Expect(envVar.ValueFrom).To(BeNil())
|
||||
g.Expect(envVar.Value).To(Equal(virtualPod.Name))
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
g.Expect(found).To(BeTrue())
|
||||
|
||||
containerStatuses := pod.Status.ContainerStatuses
|
||||
@@ -200,7 +322,6 @@ var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
By("Checking the status of the Pod in the Host Cluster")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
translator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
hostPodName := translator.NamespacedName(virtualPod)
|
||||
|
||||
hPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
|
||||
var _ = When("a cluster with private registry configuration is used", Label("e2e"), Label(registryTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -138,7 +139,9 @@ var _ = When("a cluster with private registry configuration is used", Label("e2e
|
||||
}
|
||||
|
||||
By("Creating Alpine Pod and making sure its failing to start")
|
||||
|
||||
var err error
|
||||
|
||||
alpinePod, err = virtualCluster.Client.CoreV1().Pods(alpinePod.Namespace).Create(ctx, alpinePod, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
schedv1 "k8s.io/api/scheduling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
@@ -99,6 +100,100 @@ var _ = When("a cluster's status is tracked", Label(e2eTestLabel), Label(statusT
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("created with field controlled from a policy", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
priorityClass := &schedv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
},
|
||||
Value: 100,
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, priorityClass)).To(Succeed())
|
||||
|
||||
clusterObj := &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "status-cluster-",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1beta1.ClusterSpec{
|
||||
PriorityClass: priorityClass.Name,
|
||||
},
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
|
||||
|
||||
DeferCleanup(func() {
|
||||
Expect(k8sClient.Delete(ctx, priorityClass)).To(Succeed())
|
||||
})
|
||||
|
||||
clusterKey := client.ObjectKeyFromObject(clusterObj)
|
||||
|
||||
// Check for the initial status to be set
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterProvisioning))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionFalse))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioning))
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Second * 20).
|
||||
Should(Succeed())
|
||||
|
||||
// Check for the status to be updated to Ready
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterReady))
|
||||
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(clusterObj.Status.Policy.Name).To(Equal(vcp.Name))
|
||||
|
||||
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(Equal(metav1.ConditionTrue))
|
||||
g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioned))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
|
||||
// update policy
|
||||
|
||||
priorityClassVCP := &schedv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pc-",
|
||||
},
|
||||
Value: 100,
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, priorityClassVCP)).To(Succeed())
|
||||
|
||||
DeferCleanup(func() {
|
||||
Expect(k8sClient.Delete(ctx, priorityClassVCP)).To(Succeed())
|
||||
})
|
||||
|
||||
vcp.Spec.DefaultPriorityClass = priorityClassVCP.Name
|
||||
Expect(k8sClient.Update(ctx, vcp)).To(Succeed())
|
||||
|
||||
// Check for the status to be updated to Ready
|
||||
Eventually(func(g Gomega) {
|
||||
err := k8sClient.Get(ctx, clusterKey, clusterObj)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(clusterObj.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(clusterObj.Status.Policy.PriorityClass).To(Not(BeNil()))
|
||||
g.Expect(*clusterObj.Status.Policy.PriorityClass).To(Equal(priorityClassVCP.Name))
|
||||
g.Expect(clusterObj.Spec.PriorityClass).To(Equal(priorityClass.Name))
|
||||
}).
|
||||
WithTimeout(time.Minute * 3).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
Context("and the cluster has validation errors", func() {
|
||||
@@ -79,6 +79,7 @@ var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), f
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
virtualService, err = virtualCluster.Client.CoreV1().Services("default").Create(ctx, virtualService, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
@@ -20,7 +20,9 @@ import (
|
||||
|
||||
var _ = When("a shared mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
@@ -174,7 +176,9 @@ var _ = When("a shared mode cluster update its envs", Label(e2eTestLabel), Label
|
||||
|
||||
var _ = When("a shared mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
@@ -238,7 +242,9 @@ var _ = When("a shared mode cluster update its server args", Label(e2eTestLabel)
|
||||
|
||||
var _ = When("a virtual mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
@@ -389,7 +395,9 @@ var _ = When("a virtual mode cluster update its envs", Label(e2eTestLabel), Labe
|
||||
|
||||
var _ = When("a virtual mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
|
||||
@@ -502,6 +510,7 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
|
||||
|
||||
It("will update server version when version spec is updated", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -531,6 +540,7 @@ var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), La
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
@@ -594,6 +604,7 @@ var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), L
|
||||
|
||||
It("will update server version when version spec is updated", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -649,6 +660,7 @@ var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Lab
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -691,6 +703,7 @@ var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Lab
|
||||
})
|
||||
It("will scale up server pods", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -719,6 +732,7 @@ var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Lab
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
@@ -734,6 +748,7 @@ var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), L
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -780,6 +795,7 @@ var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), L
|
||||
})
|
||||
It("will scale down server pods", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -806,6 +822,7 @@ var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), L
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
_, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
@@ -821,6 +838,7 @@ var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), La
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -863,6 +881,7 @@ var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), La
|
||||
})
|
||||
It("will scale up server pods", func() {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -891,6 +910,7 @@ var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), La
|
||||
|
||||
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).To(BeNil())
|
||||
|
||||
_, cond := pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
@@ -906,6 +926,7 @@ var _ = When("a virtual mode cluster scales down servers", Label(e2eTestLabel),
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
namespace := NewNamespace()
|
||||
@@ -955,6 +976,7 @@ var _ = When("a virtual mode cluster scales down servers", Label(e2eTestLabel),
|
||||
By("Scaling down cluster")
|
||||
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
|
||||
@@ -412,13 +412,15 @@ func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
Eventually(func(g Gomega) {
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
|
||||
Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
return serverPods[0].DeletionTimestamp
|
||||
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
|
||||
g.Expect(serverPods).To(HaveLen(1))
|
||||
g.Expect(serverPods[0].DeletionTimestamp).To(Not(BeNil()))
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(Succeed())
|
||||
}
|
||||
|
||||
func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []v1.Pod {
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
|
||||
@@ -77,7 +78,6 @@ var (
|
||||
k8s *kubernetes.Clientset
|
||||
k8sClient client.Client
|
||||
kubeconfigPath string
|
||||
repo string
|
||||
helmActionConfig *action.Configuration
|
||||
)
|
||||
|
||||
@@ -86,17 +86,17 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
GinkgoWriter.Println("GOCOVERDIR:", os.Getenv("GOCOVERDIR"))
|
||||
|
||||
repo = os.Getenv("REPO")
|
||||
if repo == "" {
|
||||
repo = "rancher"
|
||||
}
|
||||
|
||||
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
|
||||
|
||||
if dockerInstallEnabled {
|
||||
installK3SDocker(ctx)
|
||||
repo := os.Getenv("REPO")
|
||||
if repo == "" {
|
||||
repo = "rancher"
|
||||
}
|
||||
|
||||
installK3SDocker(ctx, repo+"/k3k", repo+"/k3k-kubelet")
|
||||
initKubernetesClient(ctx)
|
||||
installK3kChart()
|
||||
installK3kChart(repo+"/k3k", repo+"/k3k-kubelet")
|
||||
} else {
|
||||
initKubernetesClient(ctx)
|
||||
}
|
||||
@@ -110,6 +110,11 @@ func initKubernetesClient(ctx context.Context) {
|
||||
kubeconfig []byte
|
||||
)
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
log.SetLogger(zapr.NewLogger(logger))
|
||||
|
||||
kubeconfigPath := os.Getenv("KUBECONFIG")
|
||||
Expect(kubeconfigPath).To(Not(BeEmpty()))
|
||||
|
||||
@@ -128,21 +133,12 @@ func initKubernetesClient(ctx context.Context) {
|
||||
scheme := buildScheme()
|
||||
k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
log.SetLogger(zapr.NewLogger(logger))
|
||||
}
|
||||
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
err := v1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = appsv1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = networkingv1.AddToScheme(scheme)
|
||||
err := clientgoscheme.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = v1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@@ -150,7 +146,7 @@ func buildScheme() *runtime.Scheme {
|
||||
return scheme
|
||||
}
|
||||
|
||||
func installK3SDocker(ctx context.Context) {
|
||||
func installK3SDocker(ctx context.Context, controllerImage, kubeletImage string) {
|
||||
var (
|
||||
err error
|
||||
kubeconfig []byte
|
||||
@@ -182,16 +178,15 @@ func installK3SDocker(ctx context.Context) {
|
||||
Expect(tmpFile.Close()).To(Succeed())
|
||||
kubeconfigPath = tmpFile.Name()
|
||||
|
||||
err = k3sContainer.LoadImages(ctx, repo+"/k3k:dev", repo+"/k3k-kubelet:dev")
|
||||
err = k3sContainer.LoadImages(ctx, controllerImage+":dev", kubeletImage+":dev")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
DeferCleanup(os.Remove, kubeconfigPath)
|
||||
|
||||
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
|
||||
GinkgoWriter.Print(kubeconfigPath)
|
||||
GinkgoWriter.Print(string(kubeconfig))
|
||||
GinkgoWriter.Printf("KUBECONFIG set to: %s\n", kubeconfigPath)
|
||||
}
|
||||
|
||||
func installK3kChart() {
|
||||
func installK3kChart(controllerImage, kubeletImage string) {
|
||||
pwd, err := os.Getwd()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -207,7 +202,7 @@ func installK3kChart() {
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
|
||||
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
|
||||
GinkgoWriter.Printf("[Helm] "+format+"\n", v...)
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
@@ -219,9 +214,17 @@ func installK3kChart() {
|
||||
iCli.Wait = true
|
||||
|
||||
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
|
||||
|
||||
extraEnvArray, _ := controllerMap["extraEnv"].([]map[string]any)
|
||||
extraEnvArray = append(extraEnvArray, map[string]any{
|
||||
"name": "DEBUG",
|
||||
"value": "true",
|
||||
})
|
||||
controllerMap["extraEnv"] = extraEnvArray
|
||||
|
||||
imageMap, _ := controllerMap["image"].(map[string]any)
|
||||
maps.Copy(imageMap, map[string]any{
|
||||
"repository": repo + "/k3k",
|
||||
"repository": controllerImage,
|
||||
"tag": "dev",
|
||||
"pullPolicy": "IfNotPresent",
|
||||
})
|
||||
@@ -230,14 +233,14 @@ func installK3kChart() {
|
||||
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
|
||||
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
|
||||
maps.Copy(sharedAgentImageMap, map[string]any{
|
||||
"repository": repo + "/k3k-kubelet",
|
||||
"repository": kubeletImage,
|
||||
"tag": "dev",
|
||||
})
|
||||
|
||||
release, err := iCli.Run(k3kChart, k3kChart.Values)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Printf("Release %s installed in %s namespace\n", release.Name, release.Namespace)
|
||||
GinkgoWriter.Printf("Helm release '%s' installed in '%s' namespace\n", release.Name, release.Namespace)
|
||||
}
|
||||
|
||||
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
|
||||
@@ -300,36 +303,28 @@ func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
|
||||
_, err = clientset.AppsV1().Deployments(k3kNamespace).Update(ctx, k3kDeployment, metav1.UpdateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
GinkgoWriter.Println("Checking K3k deployment status")
|
||||
|
||||
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kDeployment.Name, metav1.GetOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(dep.Generation).To(Equal(dep.Status.ObservedGeneration))
|
||||
|
||||
// 1. Check if the controller has observed the latest generation
|
||||
if dep.Generation > dep.Status.ObservedGeneration {
|
||||
GinkgoWriter.Printf("K3k deployment generation: %d, observed generation: %d\n", dep.Generation, dep.Status.ObservedGeneration)
|
||||
return false
|
||||
var availableCond appsv1.DeploymentCondition
|
||||
|
||||
for _, cond := range dep.Status.Conditions {
|
||||
if cond.Type == appsv1.DeploymentAvailable {
|
||||
availableCond = cond
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Check if all replicas have been updated
|
||||
if dep.Spec.Replicas != nil && dep.Status.UpdatedReplicas < *dep.Spec.Replicas {
|
||||
GinkgoWriter.Printf("K3k deployment replicas: %d, updated replicas: %d\n", *dep.Spec.Replicas, dep.Status.UpdatedReplicas)
|
||||
return false
|
||||
}
|
||||
|
||||
// 3. Check if all updated replicas are available
|
||||
if dep.Status.AvailableReplicas < dep.Status.UpdatedReplicas {
|
||||
GinkgoWriter.Printf("K3k deployment available replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
g.Expect(availableCond.Type).To(Equal(appsv1.DeploymentAvailable))
|
||||
g.Expect(availableCond.Status).To(Equal(v1.ConditionTrue))
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Second * 30).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
}
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
@@ -342,6 +337,7 @@ var _ = AfterSuite(func() {
|
||||
}
|
||||
|
||||
dumpK3kCoverageData(ctx, goCoverDir)
|
||||
|
||||
if k3sContainer != nil {
|
||||
// dump k3s logs
|
||||
k3sLogs, err := k3sContainer.Logs(ctx)
|
||||
@@ -40,6 +40,7 @@ var (
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
By("bootstrapping test environment")
|
||||
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "templates", "crds")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
@@ -81,6 +82,7 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err = mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to run manager")
|
||||
}()
|
||||
@@ -90,6 +92,7 @@ var _ = AfterSuite(func() {
|
||||
cancel()
|
||||
|
||||
By("tearing down the test environment")
|
||||
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
@@ -35,6 +35,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
createdNS := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}}
|
||||
err := k8sClient.Create(context.Background(), createdNS)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
namespace = createdNS.Name
|
||||
})
|
||||
|
||||
@@ -79,6 +80,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
Eventually(func() string {
|
||||
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return cluster.Status.HostVersion
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
@@ -130,6 +132,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
err := k8sClient.Get(ctx, serviceKey, &service)
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
|
||||
return service.Spec.Type
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
@@ -165,6 +168,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
err := k8sClient.Get(ctx, serviceKey, &service)
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
|
||||
return service.Spec.Type
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
@@ -213,6 +217,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
err := k8sClient.Get(ctx, serviceKey, &service)
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
|
||||
return service.Spec.Type
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
@@ -329,6 +334,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
// Wait for the statefulset to be created and verify volumes/mounts
|
||||
var statefulSet appsv1.StatefulSet
|
||||
|
||||
statefulSetName := k3kcontroller.SafeConcatNameWithPrefix(cluster.Name, "server")
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -343,6 +349,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
// Verify the addon volume exists
|
||||
var addonVolume *corev1.Volume
|
||||
|
||||
for i := range statefulSet.Spec.Template.Spec.Volumes {
|
||||
v := &statefulSet.Spec.Template.Spec.Volumes[i]
|
||||
if v.Name == "addon-test-addon" {
|
||||
@@ -350,6 +357,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Expect(addonVolume).NotTo(BeNil(), "addon volume should exist")
|
||||
Expect(addonVolume.VolumeSource.Secret).NotTo(BeNil())
|
||||
Expect(addonVolume.VolumeSource.Secret.SecretName).To(Equal("test-addon"))
|
||||
@@ -359,6 +367,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
Expect(containers).NotTo(BeEmpty())
|
||||
|
||||
var addonMount *corev1.VolumeMount
|
||||
|
||||
for i := range containers[0].VolumeMounts {
|
||||
m := &containers[0].VolumeMounts[i]
|
||||
if m.Name == "addon-test-addon" {
|
||||
@@ -366,6 +375,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Expect(addonMount).NotTo(BeNil(), "addon volume mount should exist")
|
||||
Expect(addonMount.MountPath).To(Equal("/var/lib/rancher/k3s/server/manifests/test-addon"))
|
||||
Expect(addonMount.ReadOnly).To(BeTrue())
|
||||
@@ -391,6 +401,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
"manifest.yaml": []byte("apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cm-two\n"),
|
||||
},
|
||||
}
|
||||
|
||||
Expect(k8sClient.Create(ctx, addonSecret1)).To(Succeed())
|
||||
Expect(k8sClient.Create(ctx, addonSecret2)).To(Succeed())
|
||||
|
||||
@@ -411,6 +422,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
// Wait for the statefulset to be created
|
||||
var statefulSet appsv1.StatefulSet
|
||||
|
||||
statefulSetName := k3kcontroller.SafeConcatNameWithPrefix(cluster.Name, "server")
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -428,11 +440,13 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
// Extract only addon volumes (those starting with "addon-")
|
||||
var addonVolumes []corev1.Volume
|
||||
|
||||
for _, v := range volumes {
|
||||
if strings.HasPrefix(v.Name, "addon-") {
|
||||
addonVolumes = append(addonVolumes, v)
|
||||
}
|
||||
}
|
||||
|
||||
Expect(addonVolumes).To(HaveLen(2))
|
||||
Expect(addonVolumes[0].Name).To(Equal("addon-addon-one"))
|
||||
Expect(addonVolumes[1].Name).To(Equal("addon-addon-two"))
|
||||
@@ -443,11 +457,13 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
// Extract only addon mounts (those starting with "addon-")
|
||||
var addonMounts []corev1.VolumeMount
|
||||
|
||||
for _, m := range containers[0].VolumeMounts {
|
||||
if strings.HasPrefix(m.Name, "addon-") {
|
||||
addonMounts = append(addonMounts, m)
|
||||
}
|
||||
}
|
||||
|
||||
Expect(addonMounts).To(HaveLen(2))
|
||||
Expect(addonMounts[0].Name).To(Equal("addon-addon-one"))
|
||||
Expect(addonMounts[0].MountPath).To(Equal("/var/lib/rancher/k3s/server/manifests/addon-one"))
|
||||
@@ -76,6 +76,7 @@ var ConfigMapTests = func() {
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -113,6 +114,7 @@ var ConfigMapTests = func() {
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -146,6 +148,7 @@ var ConfigMapTests = func() {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return hostConfigMap.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -172,6 +175,7 @@ var ConfigMapTests = func() {
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -192,6 +196,7 @@ var ConfigMapTests = func() {
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -221,12 +226,14 @@ var ConfigMapTests = func() {
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
GinkgoWriter.Printf("error: %v", err)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -107,6 +107,7 @@ var IngressTests = func() {
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -172,6 +173,7 @@ var IngressTests = func() {
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -205,6 +207,7 @@ var IngressTests = func() {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -256,6 +259,7 @@ var IngressTests = func() {
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -280,6 +284,7 @@ var IngressTests = func() {
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -335,11 +340,13 @@ var IngressTests = func() {
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -85,6 +85,7 @@ var PVCTests = func() {
|
||||
By(fmt.Sprintf("Created PVC %s in virtual cluster", pvc.Name))
|
||||
|
||||
var hostPVC v1.PersistentVolumeClaim
|
||||
|
||||
hostPVCName := translateName(cluster, pvc.Namespace, pvc.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -102,6 +103,7 @@ var PVCTests = func() {
|
||||
GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels)
|
||||
|
||||
var virtualPV v1.PersistentVolume
|
||||
|
||||
key := client.ObjectKey{Name: pvc.Name}
|
||||
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, &virtualPV)
|
||||
@@ -81,6 +81,7 @@ var PriorityClassTests = func() {
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -113,6 +114,7 @@ var PriorityClassTests = func() {
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -144,6 +146,7 @@ var PriorityClassTests = func() {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return hostPriorityClass.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -165,6 +168,7 @@ var PriorityClassTests = func() {
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -185,6 +189,7 @@ var PriorityClassTests = func() {
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -207,6 +212,7 @@ var PriorityClassTests = func() {
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -242,11 +248,13 @@ var PriorityClassTests = func() {
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -76,6 +76,7 @@ var SecretTests = func() {
|
||||
By(fmt.Sprintf("Created Secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -113,6 +114,7 @@ var SecretTests = func() {
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -144,6 +146,7 @@ var SecretTests = func() {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return hostSecret.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -170,6 +173,7 @@ var SecretTests = func() {
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -190,6 +194,7 @@ var SecretTests = func() {
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -219,11 +224,13 @@ var SecretTests = func() {
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -84,6 +84,7 @@ var ServiceTests = func() {
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -132,6 +133,7 @@ var ServiceTests = func() {
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -163,6 +165,7 @@ var ServiceTests = func() {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return hostService.Spec.Ports[0].Name
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -196,6 +199,7 @@ var ServiceTests = func() {
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
@@ -218,6 +222,7 @@ var ServiceTests = func() {
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -255,11 +260,13 @@ var ServiceTests = func() {
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
@@ -133,6 +133,7 @@ var _ = Describe("Kubelet Controller", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
hostManager, err = ctrl.NewManager(hostTestEnv.Config, ctrl.Options{
|
||||
@@ -151,12 +152,14 @@ var _ = Describe("Kubelet Controller", func() {
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := hostManager.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to run host manager")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := virtManager.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to run virt manager")
|
||||
}()
|
||||
@@ -37,6 +37,7 @@ var (
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
By("bootstrapping test environment")
|
||||
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "templates", "crds")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
@@ -59,6 +60,7 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err = mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to run manager")
|
||||
}()
|
||||
@@ -68,6 +70,7 @@ var _ = AfterSuite(func() {
|
||||
cancel()
|
||||
|
||||
By("tearing down the test environment")
|
||||
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
@@ -2,7 +2,6 @@ package policy_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -78,6 +77,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
return k8sClient.Get(ctx, key, networkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
@@ -133,6 +133,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
return k8sClient.Get(context.Background(), key, networkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
@@ -155,6 +156,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
return k8sClient.Get(ctx, key, networkPolicy)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
@@ -183,6 +185,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Eventually(func() string {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
@@ -208,6 +211,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Eventually(func() string {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
@@ -229,6 +233,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Eventually(func() string {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
@@ -250,7 +255,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
_, found := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
|
||||
return found
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
@@ -278,7 +285,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Eventually(func() bool {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
|
||||
return enforceValue == "privileged"
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
@@ -296,7 +305,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Eventually(func() bool {
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: namespace.Name}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
enforceValue := ns.Labels["pod-security.kubernetes.io/enforce"]
|
||||
|
||||
return enforceValue == "privileged"
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
@@ -307,7 +318,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Expect(ns.Labels).Should(HaveKeyWithValue("pod-security.kubernetes.io/enforce-version", "latest"))
|
||||
})
|
||||
|
||||
It("should update Cluster's PriorityClass", func() {
|
||||
It("updates the Cluster's policy status with the DefaultPriorityClass", func() {
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultPriorityClass: "foobar",
|
||||
})
|
||||
@@ -329,19 +340,22 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return cluster.Spec.PriorityClass == policy.Spec.DefaultPriorityClass
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
g.Expect(cluster.Spec.PriorityClass).To(BeEmpty())
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.PriorityClass).To(Not(BeNil()))
|
||||
g.Expect(*cluster.Status.Policy.PriorityClass).To(Equal(policy.Spec.DefaultPriorityClass))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should update Cluster's NodeSelector", func() {
|
||||
It("updates the Cluster's policy status with the DefaultNodeSelector", func() {
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
@@ -366,18 +380,21 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
|
||||
g.Expect(cluster.Spec.NodeSelector).To(BeEmpty())
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should update the nodeSelector if changed", func() {
|
||||
It("updates the Cluster's policy status when the VCP nodeSelector changes", func() {
|
||||
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
|
||||
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
|
||||
})
|
||||
@@ -399,43 +416,57 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(cluster.Spec.NodeSelector).To(Equal(policy.Spec.DefaultNodeSelector))
|
||||
// Cluster Spec should not change, VCP NodeSelector should be present in the Status
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(Succeed())
|
||||
|
||||
// update the VirtualClusterPolicy
|
||||
policy.Spec.DefaultNodeSelector["label-2"] = "value-2"
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit
|
||||
Eventually(func() bool {
|
||||
Eventually(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(cluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
|
||||
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1"}))
|
||||
g.Expect(cluster.Status.Policy).To(Not(BeNil()))
|
||||
g.Expect(cluster.Status.Policy.NodeSelector).To(Equal(map[string]string{"label-1": "value-1", "label-2": "value-2"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
|
||||
// Update the Cluster
|
||||
err = k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
cluster.Spec.NodeSelector["label-3"] = "value-3"
|
||||
err = k8sClient.Update(ctx, cluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(cluster.Spec.NodeSelector).To(Not(Equal(policy.Spec.DefaultNodeSelector)))
|
||||
|
||||
// wait a bit and check it's restored
|
||||
Eventually(func() bool {
|
||||
var updatedCluster v1beta1.Cluster
|
||||
|
||||
Consistently(func(g Gomega) {
|
||||
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
|
||||
err = k8sClient.Get(ctx, key, &updatedCluster)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
return reflect.DeepEqual(updatedCluster.Spec.NodeSelector, policy.Spec.DefaultNodeSelector)
|
||||
err = k8sClient.Get(ctx, key, cluster)
|
||||
g.Expect(err).To(Not(HaveOccurred()))
|
||||
g.Expect(cluster.Spec.NodeSelector).To(Equal(map[string]string{"label-1": "value-1", "label-3": "value-3"}))
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
Should(Succeed())
|
||||
})
|
||||
|
||||
It("should create a ResourceQuota if Quota is enabled", func() {
|
||||
@@ -451,6 +482,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
bindPolicyToNamespace(namespace, policy)
|
||||
|
||||
var resourceQuota v1.ResourceQuota
|
||||
|
||||
Eventually(func() error {
|
||||
key := types.NamespacedName{
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
@@ -485,6 +517,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
@@ -494,6 +527,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
// get policy again
|
||||
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(policy), policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
policy.Spec.Quota = nil
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
@@ -505,6 +539,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, &resourceQuota)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
@@ -531,6 +566,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Name: k3kcontroller.SafeConcatNameWithPrefix(clusterPolicy.Name),
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
return k8sClient.Get(ctx, key, &resourceQuota)
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
@@ -548,6 +584,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
err := k8sClient.Get(ctx, key, &resourceQuota)
|
||||
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
Reference in New Issue
Block a user