mirror of
https://github.com/rancher/k3k.git
synced 2026-02-19 20:40:04 +00:00
Compare commits
10 Commits
chart-0.3.
...
chart-0.3.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4fe36b3d0c | ||
|
|
01589bb359 | ||
|
|
30217df268 | ||
|
|
04198652d5 | ||
|
|
72eb819216 | ||
|
|
4d4003f6f9 | ||
|
|
aca01127f8 | ||
|
|
1550c6b45a | ||
|
|
caf785f23b | ||
|
|
b3f7a8ab7f |
19
.github/workflows/test.yaml
vendored
19
.github/workflows/test.yaml
vendored
@@ -79,6 +79,13 @@ jobs:
|
||||
|
||||
- name: Install Ginkgo
|
||||
run: go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
- name: Set coverage environment
|
||||
run: |
|
||||
mkdir ${{ github.workspace }}/covdata
|
||||
|
||||
echo "COVERAGE=true" >> $GITHUB_ENV
|
||||
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and package
|
||||
run: |
|
||||
@@ -94,7 +101,17 @@ jobs:
|
||||
- name: Run e2e tests
|
||||
run: make test-e2e
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
- name: Convert coverage data
|
||||
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
|
||||
|
||||
- name: Upload coverage reports to Codecov (controller)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ${GOCOVERDIR}/cover.out
|
||||
flags: controller
|
||||
|
||||
- name: Upload coverage reports to Codecov (e2e)
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
8
Makefile
8
Makefile
@@ -105,10 +105,10 @@ validate: generate docs fmt ## Validate the project checking for any dependency
|
||||
.PHONY: install
|
||||
install: ## Install K3k with Helm on the targeted Kubernetes cluster
|
||||
helm upgrade --install --namespace k3k-system --create-namespace \
|
||||
--set image.repository=$(REPO)/k3k \
|
||||
--set image.tag=$(VERSION) \
|
||||
--set sharedAgent.image.repository=$(REPO)/k3k-kubelet \
|
||||
--set sharedAgent.image.tag=$(VERSION) \
|
||||
--set controller.image.repository=$(REPO)/k3k \
|
||||
--set controller.image.tag=$(VERSION) \
|
||||
--set agent.shared.image.repository=$(REPO)/k3k-kubelet \
|
||||
--set agent.shared.image.tag=$(VERSION) \
|
||||
k3k ./charts/k3k/
|
||||
|
||||
.PHONY: help
|
||||
|
||||
@@ -71,7 +71,7 @@ To install it, simply download the latest available version for your architectur
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.4/k3kcli-linux-amd64 && \
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.5/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
@@ -79,7 +79,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.4/k3kcli-l
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli version v0.3.4
|
||||
k3kcli version v0.3.5
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 0.3.4
|
||||
appVersion: v0.3.4
|
||||
version: 0.3.5
|
||||
appVersion: v0.3.5
|
||||
|
||||
@@ -365,6 +365,11 @@ spec:
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: ingress, loadbalancer and nodePort are mutually exclusive;
|
||||
only one can be set
|
||||
rule: '[has(self.ingress), has(self.loadbalancer), has(self.nodePort)].filter(x,
|
||||
x).size() <= 1'
|
||||
mirrorHostNodes:
|
||||
description: |-
|
||||
MirrorHostNodes controls whether node objects from the host cluster
|
||||
@@ -574,6 +579,108 @@ spec:
|
||||
x-kubernetes-validations:
|
||||
- message: serviceCIDR is immutable
|
||||
rule: self == oldSelf
|
||||
sync:
|
||||
default: {}
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
properties:
|
||||
configmaps:
|
||||
default:
|
||||
enabled: true
|
||||
description: ConfigMaps resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
ingresses:
|
||||
default:
|
||||
enabled: false
|
||||
description: Ingresses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
persistentVolumeClaims:
|
||||
default:
|
||||
enabled: true
|
||||
description: PersistentVolumeClaims resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
priorityClasses:
|
||||
default:
|
||||
enabled: false
|
||||
description: PriorityClasses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
secrets:
|
||||
default:
|
||||
enabled: true
|
||||
description: Secrets resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
services:
|
||||
default:
|
||||
enabled: true
|
||||
description: Services resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
tlsSANs:
|
||||
description: TLSSANs specifies subject alternative names for the K3s
|
||||
server certificate.
|
||||
|
||||
@@ -225,6 +225,108 @@ spec:
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
type: object
|
||||
sync:
|
||||
default: {}
|
||||
description: Sync specifies the resources types that will be synced
|
||||
from virtual cluster to host cluster.
|
||||
properties:
|
||||
configmaps:
|
||||
default:
|
||||
enabled: true
|
||||
description: ConfigMaps resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
ingresses:
|
||||
default:
|
||||
enabled: false
|
||||
description: Ingresses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
persistentVolumeClaims:
|
||||
default:
|
||||
enabled: true
|
||||
description: PersistentVolumeClaims resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
priorityClasses:
|
||||
default:
|
||||
enabled: false
|
||||
description: PriorityClasses resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
secrets:
|
||||
default:
|
||||
enabled: true
|
||||
description: Secrets resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
services:
|
||||
default:
|
||||
enabled: true
|
||||
description: Services resources sync configuration.
|
||||
properties:
|
||||
enabled:
|
||||
description: Enabled is an on/off switch for syncing resources.
|
||||
type: boolean
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Selector specifies set of labels of the resources that will be synced, if empty
|
||||
then all resources of the given type will be synced.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: Status reflects the observed state of the VirtualClusterPolicy.
|
||||
|
||||
@@ -60,3 +60,54 @@ Create the name of the service account to use
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Print the image pull secrets in the expected format (an array of objects with one possible field, "name").
|
||||
*/}}
|
||||
{{- define "image.pullSecrets" }}
|
||||
{{- $imagePullSecrets := list }}
|
||||
{{- range . }}
|
||||
{{- if kindIs "string" . }}
|
||||
{{- $imagePullSecrets = append $imagePullSecrets (dict "name" .) }}
|
||||
{{- else }}
|
||||
{{- $imagePullSecrets = append $imagePullSecrets . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml $imagePullSecrets }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "controller.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.controller.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "server.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.server.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "agent.virtual.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.agent.virtual.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "agent.shared.registry" }}
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.agent.shared.image.registry -}}
|
||||
{{- if $registry }}
|
||||
{{- $registry }}/
|
||||
{{- else }}
|
||||
{{- $registry }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
{{- include "k3k.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: {{ .Values.image.replicaCount }}
|
||||
replicas: {{ .Values.controller.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "k3k.selectorLabels" . | nindent 6 }}
|
||||
@@ -15,25 +15,51 @@ spec:
|
||||
labels:
|
||||
{{- include "k3k.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
imagePullSecrets: {{- include "image.pullSecrets" (concat .Values.controller.imagePullSecrets .Values.global.imagePullSecrets) | nindent 8 }}
|
||||
containers:
|
||||
- image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
- image: "{{- include "controller.registry" .}}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
|
||||
name: {{ .Chart.Name }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ .Values.controller.resources.requests.cpu }}
|
||||
memory: {{ .Values.controller.resources.requests.memory }}
|
||||
limits:
|
||||
{{ if .Values.controller.resources.limits.cpu }}
|
||||
cpu: {{ .Values.controller.resources.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if .Values.controller.resources.limits.memory }}
|
||||
memory: {{ .Values.controller.resources.limits.memory }}
|
||||
{{ end}}
|
||||
args:
|
||||
- k3k
|
||||
{{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
|
||||
- --agent-image-pull-secret
|
||||
- {{ .name }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := include "image.pullSecrets" (concat .Values.server.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
|
||||
- --server-image-pull-secret
|
||||
- {{ .name }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: CLUSTER_CIDR
|
||||
value: {{ .Values.host.clusterCIDR }}
|
||||
- name: SHARED_AGENT_IMAGE
|
||||
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
|
||||
- name: SHARED_AGENT_PULL_POLICY
|
||||
value: {{ .Values.sharedAgent.image.pullPolicy }}
|
||||
- name: K3S_IMAGE
|
||||
value: {{ .Values.k3sServer.image.repository }}
|
||||
- name: K3S_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.k3sServer.image.pullPolicy }}
|
||||
value: "{{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }}"
|
||||
- name: SHARED_AGENT_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.agent.shared.image.pullPolicy }}
|
||||
- name: VIRTUAL_AGENT_IMAGE
|
||||
value: "{{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}"
|
||||
- name: VIRTUAL_AGENT_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.agent.virtual.image.pullPolicy }}
|
||||
- name: K3S_SERVER_IMAGE
|
||||
value: "{{- include "server.registry" .}}{{ .Values.server.image.repository }}"
|
||||
- name: K3S_SERVER_IMAGE_PULL_POLICY
|
||||
value: {{ .Values.server.image.pullPolicy }}
|
||||
- name: KUBELET_PORT_RANGE
|
||||
value: {{ .Values.sharedAgent.kubeletPortRange }}
|
||||
value: {{ .Values.agent.shared.kubeletPortRange }}
|
||||
- name: WEBHOOK_PORT_RANGE
|
||||
value: {{ .Values.sharedAgent.webhookPortRange }}
|
||||
value: {{ .Values.agent.shared.webhookPortRange }}
|
||||
- name: CONTROLLER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
||||
@@ -1,31 +1,11 @@
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: rancher/k3k
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
|
||||
# This is useful for passing custom configuration or secrets to the controller.
|
||||
# For example:
|
||||
# extraEnv:
|
||||
# - name: MY_CUSTOM_VAR
|
||||
# value: "my_custom_value"
|
||||
# - name: ANOTHER_VAR
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: my-secret
|
||||
# key: my-key
|
||||
extraEnv: []
|
||||
|
||||
host:
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
|
||||
# the controller will collect the PodCIDRs of all the nodes on the system.
|
||||
clusterCIDR: ""
|
||||
global:
|
||||
# -- Global override for container image registry
|
||||
imageRegistry: ""
|
||||
# -- Global override for container image registry pull secrets
|
||||
imagePullSecrets: []
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
@@ -34,18 +14,65 @@ serviceAccount:
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# configuration related to the shared agent mode in k3k
|
||||
sharedAgent:
|
||||
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
|
||||
kubeletPortRange: "50000-51000"
|
||||
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
|
||||
webhookPortRange: "51001-52000"
|
||||
host:
|
||||
# clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set
|
||||
# the controller will collect the PodCIDRs of all the nodes on the system.
|
||||
clusterCIDR: ""
|
||||
|
||||
controller:
|
||||
replicaCount: 1
|
||||
image:
|
||||
repository: "rancher/k3k-kubelet"
|
||||
registry: ""
|
||||
repository: rancher/k3k
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
# image registry configuration related to the k3s server
|
||||
k3sServer:
|
||||
imagePullSecrets: []
|
||||
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
|
||||
# This is useful for passing custom configuration or secrets to the controller.
|
||||
# For example:
|
||||
# extraEnv:
|
||||
# - name: MY_CUSTOM_VAR
|
||||
# value: "my_custom_value"
|
||||
# - name: ANOTHER_VAR
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: my-secret
|
||||
# key: my-key
|
||||
extraEnv: []
|
||||
# resources limits and requests allows you to set resources limits and requests for CPU and Memory
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "100Mi"
|
||||
limits:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
# configuration related to the agent component in k3k
|
||||
agent:
|
||||
imagePullSecrets: []
|
||||
# configuration related to agent in shared mode
|
||||
shared:
|
||||
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
|
||||
kubeletPortRange: "50000-51000"
|
||||
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
|
||||
webhookPortRange: "51001-52000"
|
||||
image:
|
||||
registry: ""
|
||||
repository: "rancher/k3k-kubelet"
|
||||
tag: ""
|
||||
pullPolicy: ""
|
||||
# configuration related to agent in virtual mode
|
||||
virtual:
|
||||
image:
|
||||
registry: ""
|
||||
repository: "rancher/k3s"
|
||||
pullPolicy: ""
|
||||
|
||||
# configuration related to k3s server component in k3k
|
||||
server:
|
||||
imagePullSecrets: []
|
||||
image:
|
||||
registry:
|
||||
repository: "rancher/k3s"
|
||||
pullPolicy: ""
|
||||
|
||||
@@ -6,7 +6,7 @@ This document provides advanced usage information for k3k, including detailed us
|
||||
|
||||
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
|
||||
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/cli-docs.md) documentation for more details.
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/k3kcli.md) documentation for more details.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -134,10 +134,28 @@ _Appears in:_
|
||||
| `workerLimit` _[ResourceList](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core)_ | WorkerLimit specifies resource limits for agent nodes. | | |
|
||||
| `mirrorHostNodes` _boolean_ | MirrorHostNodes controls whether node objects from the host cluster<br />are mirrored into the virtual cluster. | | |
|
||||
| `customCAs` _[CustomCAs](#customcas)_ | CustomCAs specifies the cert/key pairs for custom CA certificates. | | |
|
||||
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
|
||||
|
||||
|
||||
|
||||
|
||||
#### ConfigMapSyncConfig
|
||||
|
||||
|
||||
|
||||
ConfigMapSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### CredentialSource
|
||||
|
||||
|
||||
@@ -229,6 +247,23 @@ _Appears in:_
|
||||
| `ingressClassName` _string_ | IngressClassName specifies the IngressClass to use for the Ingress. | | |
|
||||
|
||||
|
||||
#### IngressSyncConfig
|
||||
|
||||
|
||||
|
||||
IngressSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### LoadBalancerConfig
|
||||
|
||||
|
||||
@@ -294,6 +329,23 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
#### PersistentVolumeClaimSyncConfig
|
||||
|
||||
|
||||
|
||||
PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### PodSecurityAdmissionLevel
|
||||
|
||||
_Underlying type:_ _string_
|
||||
@@ -308,6 +360,79 @@ _Appears in:_
|
||||
|
||||
|
||||
|
||||
#### PriorityClassSyncConfig
|
||||
|
||||
|
||||
|
||||
PriorityClassSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### SecretSyncConfig
|
||||
|
||||
|
||||
|
||||
SecretSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### ServiceSyncConfig
|
||||
|
||||
|
||||
|
||||
ServiceSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [SyncConfig](#syncconfig)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `enabled` _boolean_ | Enabled is an on/off switch for syncing resources. | | |
|
||||
| `selector` _object (keys:string, values:string)_ | Selector specifies set of labels of the resources that will be synced, if empty<br />then all resources of the given type will be synced. | | |
|
||||
|
||||
|
||||
#### SyncConfig
|
||||
|
||||
|
||||
|
||||
SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
|
||||
|
||||
|
||||
|
||||
_Appears in:_
|
||||
- [ClusterSpec](#clusterspec)
|
||||
- [VirtualClusterPolicySpec](#virtualclusterpolicyspec)
|
||||
|
||||
| Field | Description | Default | Validation |
|
||||
| --- | --- | --- | --- |
|
||||
| `services` _[ServiceSyncConfig](#servicesyncconfig)_ | Services resources sync configuration. | \{ enabled:true \} | |
|
||||
| `configmaps` _[ConfigMapSyncConfig](#configmapsyncconfig)_ | ConfigMaps resources sync configuration. | \{ enabled:true \} | |
|
||||
| `secrets` _[SecretSyncConfig](#secretsyncconfig)_ | Secrets resources sync configuration. | \{ enabled:true \} | |
|
||||
| `ingresses` _[IngressSyncConfig](#ingresssyncconfig)_ | Ingresses resources sync configuration. | \{ enabled:false \} | |
|
||||
| `persistentVolumeClaims` _[PersistentVolumeClaimSyncConfig](#persistentvolumeclaimsyncconfig)_ | PersistentVolumeClaims resources sync configuration. | \{ enabled:true \} | |
|
||||
| `priorityClasses` _[PriorityClassSyncConfig](#priorityclasssyncconfig)_ | PriorityClasses resources sync configuration. | \{ enabled:false \} | |
|
||||
|
||||
|
||||
#### VirtualClusterPolicy
|
||||
|
||||
|
||||
@@ -366,6 +491,7 @@ _Appears in:_
|
||||
| `allowedMode` _[ClusterMode](#clustermode)_ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". | shared | Enum: [shared virtual] <br /> |
|
||||
| `disableNetworkPolicy` _boolean_ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. | | |
|
||||
| `podSecurityAdmissionLevel` _[PodSecurityAdmissionLevel](#podsecurityadmissionlevel)_ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. | | Enum: [privileged baseline restricted] <br /> |
|
||||
| `sync` _[SyncConfig](#syncconfig)_ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. | \{ \} | |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -32,18 +32,27 @@ Load these images into your internal (air-gapped) registry.
|
||||
Update the `values.yaml` file in the K3k Helm chart with air gap settings:
|
||||
|
||||
```yaml
|
||||
image:
|
||||
repository: rancher/k3k
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
|
||||
|
||||
sharedAgent:
|
||||
controller:
|
||||
imagePullSecrets: [] # Optional
|
||||
image:
|
||||
repository: rancher/k3k-kubelet
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional
|
||||
repository: rancher/k3k
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional: "IfNotPresent", "Always", etc.
|
||||
|
||||
k3sServer:
|
||||
agent:
|
||||
imagePullSecrets: []
|
||||
virtual:
|
||||
image:
|
||||
repository: rancher/k3s
|
||||
pullPolicy: "" # Optional
|
||||
shared:
|
||||
image:
|
||||
repository: rancher/k3k-kubelet
|
||||
tag: "" # Specify the version tag
|
||||
pullPolicy: "" # Optional
|
||||
|
||||
server:
|
||||
imagePullSecrets: [] # Optional
|
||||
image:
|
||||
repository: rancher/k3s
|
||||
pullPolicy: "" # Optional
|
||||
|
||||
@@ -1,198 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
const ConfigMapSyncerName = "configmap-syncer"
|
||||
|
||||
type ConfigMapSyncer struct {
|
||||
mutex sync.RWMutex
|
||||
// VirtualClient is the client for the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// CoreClient is the client for the host cluster
|
||||
HostClient client.Client
|
||||
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
|
||||
// representation
|
||||
TranslateFunc func(*corev1.ConfigMap) (*corev1.ConfigMap, error)
|
||||
// Logger is the logger that the controller will use
|
||||
Logger *k3klog.Logger
|
||||
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
|
||||
// through add/remove
|
||||
objs sets.Set[types.NamespacedName]
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) Name() string {
|
||||
return ConfigMapSyncerName
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
if !c.isWatching(req.NamespacedName) {
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var virtual corev1.ConfigMap
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translated, err := c.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate configmap %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
|
||||
var host corev1.ConfigMap
|
||||
if err = c.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = c.HostClient.Create(ctx, translated)
|
||||
// for simplicity's sake, we don't check for conflict errors. The existing object will get
|
||||
// picked up on in the next re-enqueue
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to create host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host configmap %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
host.Data = translated.Data
|
||||
if host.Labels == nil {
|
||||
host.Labels = make(map[string]string, len(translated.Labels))
|
||||
}
|
||||
// we don't want to override labels made on the host cluster by other applications
|
||||
// but we do need to make sure the labels that the kubelet uses to track host cluster values
|
||||
// are being tracked appropriately
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = c.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host configmap %s/%s for virtual configmap %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// isWatching is a utility method to determine if a key is in objs without the caller needing
|
||||
// to handle mutex lock/unlock.
|
||||
func (c *ConfigMapSyncer) isWatching(key types.NamespacedName) bool {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
return c.objs.Has(key)
|
||||
}
|
||||
|
||||
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
|
||||
// same resource.
|
||||
func (c *ConfigMapSyncer) AddResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if c.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// lock in write mode since we are now adding the key
|
||||
c.mutex.Lock()
|
||||
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
c.objs = c.objs.Insert(objKey)
|
||||
c.mutex.Unlock()
|
||||
|
||||
_, err := c.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
|
||||
// removed resource.
|
||||
func (c *ConfigMapSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we don't sync this object, no need to writelock/add it
|
||||
if !c.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
return c.removeHostConfigMap(ctx, namespace, name)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to remove configmap: %w", err)
|
||||
}
|
||||
|
||||
c.mutex.Lock()
|
||||
|
||||
if c.objs == nil {
|
||||
c.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
c.objs = c.objs.Delete(objKey)
|
||||
c.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) removeHostConfigMap(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vConfigMap corev1.ConfigMap
|
||||
|
||||
key := types.NamespacedName{
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, key, &vConfigMap); err != nil {
|
||||
return fmt.Errorf("unable to get virtual configmap %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
translated, err := c.TranslateFunc(&vConfigMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
return c.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
type ControllerHandler struct {
|
||||
sync.RWMutex
|
||||
// Mgr is the manager used to run new controllers - from the virtual cluster
|
||||
Mgr manager.Manager
|
||||
// Scheme is the scheme used to run new controllers - from the virtual cluster
|
||||
Scheme runtime.Scheme
|
||||
// HostClient is the client used to communicate with the host cluster
|
||||
HostClient client.Client
|
||||
// VirtualClient is the client used to communicate with the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// Translator is the translator that will be used to adjust objects before they
|
||||
// are made on the host cluster
|
||||
Translator translate.ToHostTranslator
|
||||
// Logger is the logger that the controller will use to log errors
|
||||
Logger *k3klog.Logger
|
||||
// controllers are the controllers which are currently running
|
||||
controllers map[schema.GroupVersionKind]updateableReconciler
|
||||
}
|
||||
|
||||
// updateableReconciler is a reconciler that only syncs specific resources (by name/namespace). This list can
|
||||
// be altered through the Add and Remove methods
|
||||
type updateableReconciler interface {
|
||||
reconcile.Reconciler
|
||||
Name() string
|
||||
AddResource(ctx context.Context, namespace string, name string) error
|
||||
RemoveResource(ctx context.Context, namespace string, name string) error
|
||||
}
|
||||
|
||||
func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error {
|
||||
c.RLock()
|
||||
|
||||
controllers := c.controllers
|
||||
if controllers != nil {
|
||||
if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok {
|
||||
err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
|
||||
c.RUnlock()
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// we need to manually lock/unlock since we intned on write locking to add a new controller
|
||||
c.RUnlock()
|
||||
|
||||
var r updateableReconciler
|
||||
|
||||
switch obj.(type) {
|
||||
case *v1.Secret:
|
||||
r = &SecretSyncer{
|
||||
HostClient: c.HostClient,
|
||||
VirtualClient: c.VirtualClient,
|
||||
// TODO: Need actual function
|
||||
TranslateFunc: func(s *v1.Secret) (*v1.Secret, error) {
|
||||
// note that this doesn't do any type safety - fix this
|
||||
// when generics work
|
||||
c.Translator.TranslateTo(s)
|
||||
// Remove service-account-token types when synced to the host
|
||||
if s.Type == v1.SecretTypeServiceAccountToken {
|
||||
s.Type = v1.SecretTypeOpaque
|
||||
}
|
||||
return s, nil
|
||||
},
|
||||
Logger: c.Logger,
|
||||
}
|
||||
case *v1.ConfigMap:
|
||||
r = &ConfigMapSyncer{
|
||||
HostClient: c.HostClient,
|
||||
VirtualClient: c.VirtualClient,
|
||||
// TODO: Need actual function
|
||||
TranslateFunc: func(s *v1.ConfigMap) (*v1.ConfigMap, error) {
|
||||
c.Translator.TranslateTo(s)
|
||||
return s, nil
|
||||
},
|
||||
Logger: c.Logger,
|
||||
}
|
||||
default:
|
||||
// TODO: Technically, the configmap/secret syncers are relatively generic, and this
|
||||
// logic could be used for other types.
|
||||
return fmt.Errorf("unrecognized type: %T", obj)
|
||||
}
|
||||
|
||||
err := ctrl.NewControllerManagedBy(c.Mgr).
|
||||
Named(r.Name()).
|
||||
For(&v1.ConfigMap{}).
|
||||
Complete(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to start configmap controller: %w", err)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
|
||||
if c.controllers == nil {
|
||||
c.controllers = map[schema.GroupVersionKind]updateableReconciler{}
|
||||
}
|
||||
|
||||
c.controllers[obj.GetObjectKind().GroupVersionKind()] = r
|
||||
|
||||
c.Unlock()
|
||||
|
||||
return r.AddResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
}
|
||||
|
||||
func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Object) error {
|
||||
// since we aren't adding a new controller, we don't need to lock
|
||||
c.RLock()
|
||||
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
|
||||
c.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
|
||||
}
|
||||
|
||||
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
pvcController = "pvc-syncer-controller"
|
||||
pvcFinalizerName = "pvc.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PVCReconciler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
|
||||
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
|
||||
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
// initialize a new Reconciler
|
||||
reconciler := PVCReconciler{
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
Translator: translator,
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(pvcController).
|
||||
For(&v1.PersistentVolumeClaim{}).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
virtPVC v1.PersistentVolumeClaim
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtPVC.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.hostClient.Delete(ctx, syncedPVC); !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create the pvc on host
|
||||
log.Info("creating the persistent volume for the first time on the host cluster")
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.hostClient.Create(ctx, syncedPVC))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
hostPVC := obj.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPVC)
|
||||
|
||||
return hostPVC
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller"
|
||||
k3klog "github.com/rancher/k3k/pkg/log"
|
||||
)
|
||||
|
||||
const SecretSyncerName = "secret-syncer"
|
||||
|
||||
type SecretSyncer struct {
|
||||
mutex sync.RWMutex
|
||||
// VirtualClient is the client for the virtual cluster
|
||||
VirtualClient client.Client
|
||||
// CoreClient is the client for the host cluster
|
||||
HostClient client.Client
|
||||
// TranslateFunc is the function that translates a given resource from it's virtual representation to the host
|
||||
// representation
|
||||
TranslateFunc func(*corev1.Secret) (*corev1.Secret, error)
|
||||
// Logger is the logger that the controller will use
|
||||
Logger *k3klog.Logger
|
||||
// objs are the objects that the syncer should watch/syncronize. Should only be manipulated
|
||||
// through add/remove
|
||||
objs sets.Set[types.NamespacedName]
|
||||
}
|
||||
|
||||
func (s *SecretSyncer) Name() string {
|
||||
return SecretSyncerName
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
if !s.isWatching(req.NamespacedName) {
|
||||
// return immediately without re-enqueueing. We aren't watching this resource
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
var virtual corev1.Secret
|
||||
|
||||
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtual); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to get secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translated, err := s.TranslateFunc(&virtual)
|
||||
if err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to translate secret %s/%s from virtual cluster: %w", req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
translatedKey := types.NamespacedName{
|
||||
Namespace: translated.Namespace,
|
||||
Name: translated.Name,
|
||||
}
|
||||
|
||||
var host corev1.Secret
|
||||
if err = s.HostClient.Get(ctx, translatedKey, &host); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = s.HostClient.Create(ctx, translated)
|
||||
// for simplicity's sake, we don't check for conflict errors. The existing object will get
|
||||
// picked up on in the next re-enqueue
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to create host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{Requeue: true}, fmt.Errorf("unable to get host secret %s/%s: %w", translated.Namespace, translated.Name, err)
|
||||
}
|
||||
// we are going to use the host in order to avoid conflicts on update
|
||||
host.Data = translated.Data
|
||||
if host.Labels == nil {
|
||||
host.Labels = make(map[string]string, len(translated.Labels))
|
||||
}
|
||||
// we don't want to override labels made on the host cluster by other applications
|
||||
// but we do need to make sure the labels that the kubelet uses to track host cluster values
|
||||
// are being tracked appropriately
|
||||
for key, value := range translated.Labels {
|
||||
host.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = s.HostClient.Update(ctx, &host); err != nil {
|
||||
return reconcile.Result{
|
||||
Requeue: true,
|
||||
}, fmt.Errorf("unable to update host secret %s/%s for virtual secret %s/%s: %w",
|
||||
translated.Namespace, translated.Name, req.Namespace, req.Name, err)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// isWatching is a utility method to determine if a key is in objs without the caller needing
|
||||
// to handle mutex lock/unlock.
|
||||
func (s *SecretSyncer) isWatching(key types.NamespacedName) bool {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
|
||||
return s.objs.Has(key)
|
||||
}
|
||||
|
||||
// AddResource adds a given resource to the list of resources that will be synced. Safe to call multiple times for the
|
||||
// same resource.
|
||||
func (s *SecretSyncer) AddResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// if we already sync this object, no need to writelock/add it
|
||||
if s.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// lock in write mode since we are now adding the key
|
||||
s.mutex.Lock()
|
||||
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
s.objs = s.objs.Insert(objKey)
|
||||
s.mutex.Unlock()
|
||||
|
||||
_, err := s.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: objKey,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to reconcile new object %s/%s: %w", objKey.Namespace, objKey.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveResource removes a given resource from the list of resources that will be synced. Safe to call for an already
|
||||
// removed resource.
|
||||
func (s *SecretSyncer) RemoveResource(ctx context.Context, namespace, name string) error {
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
// if we don't sync this object, no need to writelock/add it
|
||||
if !s.isWatching(objKey) {
|
||||
return nil
|
||||
}
|
||||
// lock in write mode since we are now adding the key
|
||||
if err := retry.OnError(controller.Backoff, func(err error) bool {
|
||||
return err != nil
|
||||
}, func() error {
|
||||
return s.removeHostSecret(ctx, namespace, name)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to remove secret: %w", err)
|
||||
}
|
||||
|
||||
s.mutex.Lock()
|
||||
|
||||
if s.objs == nil {
|
||||
s.objs = sets.Set[types.NamespacedName]{}
|
||||
}
|
||||
|
||||
s.objs = s.objs.Delete(objKey)
|
||||
s.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SecretSyncer) removeHostSecret(ctx context.Context, virtualNamespace, virtualName string) error {
|
||||
var vSecret corev1.Secret
|
||||
|
||||
err := s.VirtualClient.Get(ctx, types.NamespacedName{
|
||||
Namespace: virtualNamespace,
|
||||
Name: virtualName,
|
||||
}, &vSecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get virtual secret %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
translated, err := s.TranslateFunc(&vSecret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to translate virtual secret: %s/%s: %w", virtualNamespace, virtualName, err)
|
||||
}
|
||||
|
||||
return s.HostClient.Delete(ctx, translated)
|
||||
}
|
||||
150
k3k-kubelet/controller/syncer/configmap.go
Normal file
150
k3k-kubelet/controller/syncer/configmap.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
configMapControllerName = "configmap-syncer"
|
||||
configMapFinalizerName = "configmap.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type ConfigMapSyncer struct {
|
||||
// SyncerContext contains all client information for host and virtual cluster
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) Name() string {
|
||||
return configMapControllerName
|
||||
}
|
||||
|
||||
// AddConfigMapSyncer adds configmap syncer controller to the manager of the virtual cluster
|
||||
func AddConfigMapSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := ConfigMapSyncer{
|
||||
SyncerContext: &SyncerContext{
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, configMapControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&corev1.ConfigMap{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (c *ConfigMapSyncer) filterResources(object client.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for configMap Sync Config
|
||||
syncConfig := cluster.Spec.Sync.ConfigMaps
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", c.ClusterName, "clusterNamespace", c.ClusterName)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var virtualConfigMap corev1.ConfigMap
|
||||
|
||||
if err := c.VirtualClient.Get(ctx, req.NamespacedName, &virtualConfigMap); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedConfigMap := c.translateConfigMap(&virtualConfigMap)
|
||||
|
||||
// handle deletion
|
||||
if !virtualConfigMap.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced configMap if exist
|
||||
if err := c.HostClient.Delete(ctx, syncedConfigMap); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced configMap
|
||||
if controllerutil.RemoveFinalizer(&virtualConfigMap, configMapFinalizerName) {
|
||||
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtualConfigMap, configMapFinalizerName) {
|
||||
if err := c.VirtualClient.Update(ctx, &virtualConfigMap); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var hostConfigMap corev1.ConfigMap
|
||||
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: syncedConfigMap.Name, Namespace: syncedConfigMap.Namespace}, &hostConfigMap); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the ConfigMap for the first time on the host cluster")
|
||||
return reconcile.Result{}, c.HostClient.Create(ctx, syncedConfigMap)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// TODO: Add option to keep labels/annotation set by the host cluster
|
||||
log.Info("updating ConfigMap on the host cluster")
|
||||
|
||||
return reconcile.Result{}, c.HostClient.Update(ctx, syncedConfigMap)
|
||||
}
|
||||
|
||||
// translateConfigMap will translate a given configMap created in the virtual cluster and
|
||||
// translates it to host cluster object
|
||||
func (c *ConfigMapSyncer) translateConfigMap(configMap *corev1.ConfigMap) *corev1.ConfigMap {
|
||||
hostConfigMap := configMap.DeepCopy()
|
||||
c.Translator.TranslateTo(hostConfigMap)
|
||||
|
||||
return hostConfigMap
|
||||
}
|
||||
243
k3k-kubelet/controller/syncer/configmap_test.go
Normal file
243
k3k-kubelet/controller/syncer/configmap_test.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var ConfigMapTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
ConfigMaps: v1alpha1.ConfigMapSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddConfigMapSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a ConfigMap on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Configmap %s in host cluster", hostConfigMapName))
|
||||
|
||||
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
|
||||
Expect(hostConfigMap.Labels).To(ContainElement("bar"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostConfigMap.Labels)
|
||||
})
|
||||
|
||||
It("updates a ConfigMap on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
|
||||
|
||||
Expect(hostConfigMap.Data).To(Equal(configMap.Data))
|
||||
Expect(hostConfigMap.Labels).NotTo(ContainElement("bar"))
|
||||
|
||||
key := client.ObjectKeyFromObject(configMap)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
configMap.Labels = map[string]string{"foo": "bar"}
|
||||
|
||||
// update virtual configmap
|
||||
err = virtTestEnv.k8sClient.Update(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(configMap.Labels).To(ContainElement("bar"))
|
||||
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, configMap)
|
||||
|
||||
// check hostConfigMap
|
||||
Eventually(func() map[string]string {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostConfigMap.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(ContainElement("bar"))
|
||||
})
|
||||
|
||||
It("deletes a configMap on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in host cluster", hostConfigMapName))
|
||||
|
||||
Expect(hostConfigMap.Data).To(Equal(hostConfigMap.Data))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
It("will not sync a configMap if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.ConfigMaps.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cm-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created configmap %s in virtual cluster", configMap.Name))
|
||||
|
||||
var hostConfigMap v1.ConfigMap
|
||||
hostConfigMapName := translateName(cluster, configMap.Namespace, configMap.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostConfigMapName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostConfigMap)
|
||||
GinkgoWriter.Printf("error: %v", err)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
161
k3k-kubelet/controller/syncer/ingress.go
Normal file
161
k3k-kubelet/controller/syncer/ingress.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
ingressControllerName = "ingress-syncer-controller"
|
||||
ingressFinalizerName = "ingress.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type IngressReconciler struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddIngressSyncer adds ingress syncer controller to the manager of the virtual cluster
|
||||
func AddIngressSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := IngressReconciler{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, ingressControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&networkingv1.Ingress{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *IngressReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for ingressConfig
|
||||
syncConfig := cluster.Spec.Sync.Ingresses
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
log.Info("reconciling ingress object")
|
||||
|
||||
var (
|
||||
virtIngress networkingv1.Ingress
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedIngress := r.ingress(&virtIngress)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedIngress, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtIngress.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.HostClient.Delete(ctx, syncedIngress); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&virtIngress, ingressFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
|
||||
if controllerutil.AddFinalizer(&virtIngress, ingressFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtIngress); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create or update the ingress on host
|
||||
var hostIngress networkingv1.Ingress
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedIngress.Name, Namespace: r.ClusterNamespace}, &hostIngress); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the ingress for the first time on the host cluster")
|
||||
return reconcile.Result{}, r.HostClient.Create(ctx, syncedIngress)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("updating ingress on the host cluster")
|
||||
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, syncedIngress)
|
||||
}
|
||||
|
||||
func (s *IngressReconciler) ingress(obj *networkingv1.Ingress) *networkingv1.Ingress {
|
||||
hostIngress := obj.DeepCopy()
|
||||
s.Translator.TranslateTo(hostIngress)
|
||||
|
||||
for _, rule := range hostIngress.Spec.Rules {
|
||||
// modify services in rules to point to the synced services
|
||||
if rule.HTTP != nil {
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
if path.Backend.Service != nil {
|
||||
path.Backend.Service.Name = s.Translator.TranslateName(obj.GetNamespace(), path.Backend.Service.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// don't sync finalizers to the host
|
||||
return hostIngress
|
||||
}
|
||||
349
k3k-kubelet/controller/syncer/ingress_test.go
Normal file
349
k3k-kubelet/controller/syncer/ingress_test.go
Normal file
@@ -0,0 +1,349 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var IngressTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
Ingresses: v1alpha1.IngressSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddIngressSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a Ingress on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
|
||||
|
||||
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
|
||||
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostIngress.Labels)
|
||||
})
|
||||
|
||||
It("updates a Ingress on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
|
||||
|
||||
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
|
||||
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
|
||||
|
||||
key := client.ObjectKeyFromObject(ingress)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name = "test-service-updated"
|
||||
|
||||
// update virtual ingress
|
||||
err = virtTestEnv.k8sClient.Update(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// check hostIngress
|
||||
Eventually(func() string {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(Equal(translateName(cluster, ingress.Namespace, "test-service-updated")))
|
||||
})
|
||||
|
||||
It("deletes a Ingress on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in host cluster", hostIngressName))
|
||||
|
||||
Expect(len(hostIngress.Spec.Rules)).To(Equal(1))
|
||||
Expect(hostIngress.Spec.Rules[0].Host).To(Equal("test.com"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/"))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Name).To(Equal(translateName(cluster, ingress.Namespace, "test-service")))
|
||||
Expect(hostIngress.Spec.Rules[0].HTTP.Paths[0].Backend.Service.Port.Name).To(Equal("test-port"))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("will not sync an Ingress if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.Ingresses.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ingress := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "ingress-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "test.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: ptr.To(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Name: "test-port",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, ingress)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Ingress %s in virtual cluster", ingress.Name))
|
||||
|
||||
var hostIngress networkingv1.Ingress
|
||||
hostIngressName := translateName(cluster, ingress.Namespace, ingress.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostIngressName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostIngress)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
138
k3k-kubelet/controller/syncer/persistentvolumeclaims.go
Normal file
138
k3k-kubelet/controller/syncer/persistentvolumeclaims.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
pvcControllerName = "pvc-syncer-controller"
|
||||
pvcFinalizerName = "pvc.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PVCReconciler struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet
|
||||
func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := PVCReconciler{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, pvcControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&v1.PersistentVolumeClaim{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for pvc config
|
||||
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
virtPVC v1.PersistentVolumeClaim
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedPVC := r.pvc(&virtPVC)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtPVC.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced pvc if exists
|
||||
if err := r.HostClient.Delete(ctx, syncedPVC); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// remove the finalizer after cleaning up the synced pvc
|
||||
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create the pvc on host
|
||||
log.Info("creating the persistent volume claim for the first time on the host cluster")
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.HostClient.Create(ctx, syncedPVC))
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
hostPVC := obj.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPVC)
|
||||
|
||||
return hostPVC
|
||||
}
|
||||
111
k3k-kubelet/controller/syncer/persistentvolumeclaims_test.go
Normal file
111
k3k-kubelet/controller/syncer/persistentvolumeclaims_test.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var PVCTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
PersistentVolumeClaims: v1alpha1.PersistentVolumeClaimSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddPVCSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a pvc on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: ptr.To("test-sc"),
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"storage": resource.MustParse("1G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created PVC %s in virtual cluster", pvc.Name))
|
||||
|
||||
var hostPVC v1.PersistentVolumeClaim
|
||||
hostPVCName := translateName(cluster, pvc.Namespace, pvc.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostPVCName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostPVC)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created PVC %s in host cluster", hostPVCName))
|
||||
|
||||
Expect(*hostPVC.Spec.StorageClassName).To(Equal("test-sc"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels)
|
||||
})
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
package controller
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-helpers/storage/volume"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -19,48 +19,54 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
podController = "pod-pvc-controller"
|
||||
pseudoPVLabel = "pod.k3k.io/pseudoPV"
|
||||
podControllerName = "pod-pvc-controller"
|
||||
pseudoPVLabel = "pod.k3k.io/pseudoPV"
|
||||
)
|
||||
|
||||
type PodReconciler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
Translator translate.ToHostTranslator
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPodPVCController adds pod controller to k3k-kubelet
|
||||
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
// initialize a new Reconciler
|
||||
reconciler := PodReconciler{
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
Translator: translator,
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, podControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(podController).
|
||||
Named(name).
|
||||
For(&v1.Pod{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for pvc config
|
||||
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
|
||||
|
||||
// If PVC syncing is disabled, only process deletions to allow for cleanup.
|
||||
return syncConfig.Enabled || object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
@@ -68,11 +74,11 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
@@ -103,14 +109,14 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, key, &pvc); err != nil {
|
||||
if err := r.VirtualClient.Get(ctx, key, &pvc); err != nil {
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
log.Info("Creating pseudo Persistent Volume")
|
||||
|
||||
pv := r.pseudoPV(&pvc)
|
||||
if err := r.virtualClient.Create(ctx, pv); err != nil {
|
||||
if err := r.VirtualClient.Create(ctx, pv); err != nil {
|
||||
return ctrlruntimeclient.IgnoreAlreadyExists(err)
|
||||
}
|
||||
|
||||
@@ -119,7 +125,7 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
|
||||
Phase: v1.VolumeBound,
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
|
||||
if err := r.VirtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -135,7 +141,7 @@ func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pv
|
||||
pvcPatch.Status.Phase = v1.ClaimBound
|
||||
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
|
||||
|
||||
return r.virtualClient.Status().Update(ctx, pvcPatch)
|
||||
return r.VirtualClient.Status().Update(ctx, pvcPatch)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
|
||||
@@ -1,4 +1,4 @@
|
||||
package controller_test
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -12,8 +12,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
@@ -42,11 +41,18 @@ var PriorityClassTests = func() {
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
PriorityClasses: v1alpha1.PriorityClassSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = controller.AddPriorityClassReconciler(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
err = syncer.AddPriorityClassSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -215,15 +221,36 @@ var PriorityClassTests = func() {
|
||||
|
||||
Expect(hostPriorityClass.Value).To(Equal(priorityClass.Value))
|
||||
Expect(hostPriorityClass.GlobalDefault).To(BeFalse())
|
||||
Expect(hostPriorityClass.Annotations[controller.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
|
||||
Expect(hostPriorityClass.Annotations[syncer.PriorityClassGlobalDefaultAnnotation]).To(Equal("true"))
|
||||
})
|
||||
|
||||
It("will not create a priorityClass on the host cluster if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.PriorityClasses.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
priorityClass := &schedulingv1.PriorityClass{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "pc-"},
|
||||
Value: 1001,
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, priorityClass)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created priorityClass %s in virtual cluster", priorityClass.Name))
|
||||
|
||||
var hostPriorityClass schedulingv1.PriorityClass
|
||||
hostPriorityClassName := translateName(cluster, priorityClass.Namespace, priorityClass.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostPriorityClassName}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostPriorityClass)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
|
||||
func translateName(cluster v1alpha1.Cluster, namespace, name string) string {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: cluster.Name,
|
||||
ClusterNamespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
return translator.TranslateName(namespace, name)
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
package controller
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
@@ -28,42 +28,32 @@ const (
|
||||
priorityClassFinalizerName = "priorityclass.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type PriorityClassReconciler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
Translator translate.ToHostTranslator
|
||||
type PriorityClassSyncer struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPriorityClassReconciler adds a PriorityClass reconciler to k3k-kubelet
|
||||
func AddPriorityClassReconciler(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
}
|
||||
|
||||
// AddPriorityClassSyncer adds a PriorityClass reconciler to k3k-kubelet
|
||||
func AddPriorityClassSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := PriorityClassReconciler{
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
Translator: translator,
|
||||
reconciler := PriorityClassSyncer{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name := translator.TranslateName("", priorityClassControllerName)
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, priorityClassControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&schedulingv1.PriorityClass{}).
|
||||
WithEventFilter(ignoreSystemPrefixPredicate).
|
||||
For(&schedulingv1.PriorityClass{}).WithEventFilter(ignoreSystemPrefixPredicate).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
@@ -83,8 +73,33 @@ var ignoreSystemPrefixPredicate = predicate.Funcs{
|
||||
},
|
||||
}
|
||||
|
||||
func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
func (r *PriorityClassSyncer) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for priorityClassConfig
|
||||
syncConfig := cluster.Spec.Sync.PriorityClasses
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
@@ -92,11 +107,11 @@ func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.R
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
@@ -106,13 +121,13 @@ func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.R
|
||||
if !priorityClass.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
// TODO add test for previous implementation without err != nil check, and also check the other controllers
|
||||
if err := r.hostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
|
||||
if err := r.HostClient.Delete(ctx, hostPriorityClass); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.RemoveFinalizer(&priorityClass, priorityClassFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -122,7 +137,7 @@ func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.R
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&priorityClass, priorityClassFinalizerName) {
|
||||
if err := r.virtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
if err := r.VirtualClient.Update(ctx, &priorityClass); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -130,19 +145,19 @@ func (r *PriorityClassReconciler) Reconcile(ctx context.Context, req reconcile.R
|
||||
// create the priorityClass on the host
|
||||
log.Info("creating the priorityClass for the first time on the host cluster")
|
||||
|
||||
err := r.hostClient.Create(ctx, hostPriorityClass)
|
||||
err := r.HostClient.Create(ctx, hostPriorityClass)
|
||||
if err != nil {
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, r.hostClient.Update(ctx, hostPriorityClass)
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, hostPriorityClass)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *PriorityClassReconciler) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
|
||||
func (r *PriorityClassSyncer) translatePriorityClass(priorityClass schedulingv1.PriorityClass) *schedulingv1.PriorityClass {
|
||||
hostPriorityClass := priorityClass.DeepCopy()
|
||||
r.Translator.TranslateTo(hostPriorityClass)
|
||||
|
||||
155
k3k-kubelet/controller/syncer/secret.go
Normal file
155
k3k-kubelet/controller/syncer/secret.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
secretControllerName = "secret-syncer"
|
||||
secretFinalizerName = "secret.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type SecretSyncer struct {
|
||||
// SyncerContext contains all client information for host and virtual cluster
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
func (s *SecretSyncer) Name() string {
|
||||
return secretControllerName
|
||||
}
|
||||
|
||||
// AddSecretSyncer adds secret syncer controller to the manager of the virtual cluster
|
||||
func AddSecretSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
reconciler := SecretSyncer{
|
||||
SyncerContext: &SyncerContext{
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, secretControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&v1.Secret{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *SecretSyncer) filterResources(object client.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for Secrets Sync Config
|
||||
syncConfig := cluster.Spec.Sync.Secrets
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler and synchronizes the objects in objs to the host cluster
|
||||
func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", s.ClusterName, "clusterNamespace", s.ClusterName)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: s.ClusterName, Namespace: s.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var virtualSecret v1.Secret
|
||||
|
||||
if err := s.VirtualClient.Get(ctx, req.NamespacedName, &virtualSecret); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedSecret := s.translateSecret(&virtualSecret)
|
||||
|
||||
// handle deletion
|
||||
if !virtualSecret.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced secret if exist
|
||||
if err := s.HostClient.Delete(ctx, syncedSecret); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced secret
|
||||
if controllerutil.RemoveFinalizer(&virtualSecret, secretFinalizerName) {
|
||||
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if controllerutil.AddFinalizer(&virtualSecret, secretFinalizerName) {
|
||||
if err := s.VirtualClient.Update(ctx, &virtualSecret); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var hostSecret v1.Secret
|
||||
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: syncedSecret.Name, Namespace: syncedSecret.Namespace}, &hostSecret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the Secret for the first time on the host cluster")
|
||||
return reconcile.Result{}, s.HostClient.Create(ctx, syncedSecret)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// TODO: Add option to keep labels/annotation set by the host cluster
|
||||
log.Info("updating Secret on the host cluster")
|
||||
|
||||
return reconcile.Result{}, s.HostClient.Update(ctx, syncedSecret)
|
||||
}
|
||||
|
||||
// translateSecret will translate a given secret created in the virtual cluster and
|
||||
// translates it to host cluster object
|
||||
func (s *SecretSyncer) translateSecret(secret *v1.Secret) *v1.Secret {
|
||||
hostSecret := secret.DeepCopy()
|
||||
|
||||
if hostSecret.Type == v1.SecretTypeServiceAccountToken {
|
||||
hostSecret.Type = v1.SecretTypeOpaque
|
||||
}
|
||||
|
||||
s.Translator.TranslateTo(hostSecret)
|
||||
|
||||
return hostSecret
|
||||
}
|
||||
240
k3k-kubelet/controller/syncer/secret_test.go
Normal file
240
k3k-kubelet/controller/syncer/secret_test.go
Normal file
@@ -0,0 +1,240 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var SecretTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
Secrets: v1alpha1.SecretSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddSecretSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a Secret on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created Secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Secret %s in host cluster", hostSecretName))
|
||||
|
||||
Expect(hostSecret.Data).To(Equal(secret.Data))
|
||||
Expect(hostSecret.Labels).To(ContainElement("bar"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostSecret.Labels)
|
||||
})
|
||||
|
||||
It("updates a Secret on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
|
||||
|
||||
Expect(hostSecret.Data).To(Equal(secret.Data))
|
||||
Expect(hostSecret.Labels).NotTo(ContainElement("bar"))
|
||||
|
||||
key := client.ObjectKeyFromObject(secret)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
secret.Labels = map[string]string{"foo": "bar"}
|
||||
|
||||
// update virtual secret
|
||||
err = virtTestEnv.k8sClient.Update(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(secret.Labels).To(ContainElement("bar"))
|
||||
|
||||
// check hostSecret
|
||||
Eventually(func() map[string]string {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostSecret.Labels
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(ContainElement("bar"))
|
||||
})
|
||||
|
||||
It("deletes a secret on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in host cluster", hostSecretName))
|
||||
|
||||
Expect(hostSecret.Data).To(Equal(hostSecret.Data))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
It("will not create a secret on the host cluster if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.Secrets.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created secret %s in virtual cluster", secret.Name))
|
||||
|
||||
var hostSecret v1.Secret
|
||||
hostSecretName := translateName(cluster, secret.Namespace, secret.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostSecretName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostSecret)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
package controller
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -19,19 +20,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
serviceSyncerController = "service-syncer-controller"
|
||||
serviceFinalizerName = "service.k3k.io/finalizer"
|
||||
serviceControllerName = "service-syncer-controller"
|
||||
serviceFinalizerName = "service.k3k.io/finalizer"
|
||||
)
|
||||
|
||||
type ServiceReconciler struct {
|
||||
clusterName string
|
||||
clusterNamespace string
|
||||
|
||||
virtualClient ctrlruntimeclient.Client
|
||||
hostClient ctrlruntimeclient.Client
|
||||
Scheme *runtime.Scheme
|
||||
HostScheme *runtime.Scheme
|
||||
Translator translate.ToHostTranslator
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddServiceSyncer adds service syncer controller to the manager of the virtual cluster
|
||||
@@ -42,24 +36,25 @@ func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
|
||||
}
|
||||
|
||||
reconciler := ServiceReconciler{
|
||||
clusterName: clusterName,
|
||||
clusterNamespace: clusterNamespace,
|
||||
|
||||
virtualClient: virtMgr.GetClient(),
|
||||
hostClient: hostMgr.GetClient(),
|
||||
Scheme: virtMgr.GetScheme(),
|
||||
HostScheme: hostMgr.GetScheme(),
|
||||
Translator: translator,
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translator,
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, serviceControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(serviceSyncerController).
|
||||
For(&v1.Service{}).
|
||||
Named(name).
|
||||
For(&v1.Service{}).WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace)
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
if req.Name == "kubernetes" || req.Name == "kube-dns" {
|
||||
@@ -71,31 +66,29 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil {
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
syncedService := r.service(&virtService)
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostScheme); err != nil {
|
||||
if err := controllerutil.SetControllerReference(&cluster, syncedService, r.HostClient.Scheme()); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// handle deletion
|
||||
if !virtService.DeletionTimestamp.IsZero() {
|
||||
// deleting the synced service if exists
|
||||
if err := r.hostClient.Delete(ctx, syncedService); err != nil {
|
||||
if err := r.HostClient.Delete(ctx, syncedService); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced service
|
||||
if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName)
|
||||
|
||||
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
if controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -104,20 +97,18 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
}
|
||||
|
||||
// Add finalizer if it does not exist
|
||||
if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) {
|
||||
controllerutil.AddFinalizer(&virtService, serviceFinalizerName)
|
||||
|
||||
if err := r.virtualClient.Update(ctx, &virtService); err != nil {
|
||||
if controllerutil.AddFinalizer(&virtService, serviceFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtService); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// create or update the service on host
|
||||
var hostService v1.Service
|
||||
if err := r.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.clusterNamespace}, &hostService); err != nil {
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: r.ClusterNamespace}, &hostService); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.Info("creating the service for the first time on the host cluster")
|
||||
return reconcile.Result{}, r.hostClient.Create(ctx, syncedService)
|
||||
return reconcile.Result{}, r.HostClient.Create(ctx, syncedService)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
@@ -125,7 +116,32 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
|
||||
log.Info("updating service on the host cluster")
|
||||
|
||||
return reconcile.Result{}, r.hostClient.Update(ctx, syncedService)
|
||||
return reconcile.Result{}, r.HostClient.Update(ctx, syncedService)
|
||||
}
|
||||
|
||||
func (r *ServiceReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for serviceSyncConfig
|
||||
syncConfig := cluster.Spec.Sync.Services
|
||||
|
||||
// If syncing is disabled, only process deletions to allow for cleanup.
|
||||
if !syncConfig.Enabled {
|
||||
return object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
labelSelector := labels.SelectorFromSet(syncConfig.Selector)
|
||||
if labelSelector.Empty() {
|
||||
return true
|
||||
}
|
||||
|
||||
return labelSelector.Matches(labels.Set(object.GetLabels()))
|
||||
}
|
||||
|
||||
func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service {
|
||||
276
k3k-kubelet/controller/syncer/service_test.go
Normal file
276
k3k-kubelet/controller/syncer/service_test.go
Normal file
@@ -0,0 +1,276 @@
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var ServiceTests = func() {
|
||||
var (
|
||||
namespace string
|
||||
cluster v1alpha1.Cluster
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
ns := v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"},
|
||||
}
|
||||
err := hostTestEnv.k8sClient.Create(ctx, &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
namespace = ns.Name
|
||||
|
||||
cluster = v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Sync: &v1alpha1.SyncConfig{
|
||||
Services: v1alpha1.ServiceSyncConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = hostTestEnv.k8sClient.Create(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = syncer.AddServiceSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||
err := hostTestEnv.k8sClient.Delete(context.Background(), &ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a service on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
|
||||
|
||||
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
|
||||
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
|
||||
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostService.Labels)
|
||||
})
|
||||
|
||||
It("updates a service on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created Service %s in host cluster", hostServiceName))
|
||||
|
||||
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
|
||||
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
|
||||
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
|
||||
|
||||
key := client.ObjectKeyFromObject(service)
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
service.Spec.Ports[0].Name = "test-port-updated"
|
||||
|
||||
// update virtual service
|
||||
err = virtTestEnv.k8sClient.Update(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// check hostService
|
||||
Eventually(func() string {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return hostService.Spec.Ports[0].Name
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(Equal("test-port-updated"))
|
||||
})
|
||||
|
||||
It("deletes a service on the host cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() error {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
return hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in host cluster", hostServiceName))
|
||||
|
||||
Expect(hostService.Spec.Type).To(Equal(v1.ServiceTypeNodePort))
|
||||
Expect(hostService.Spec.Ports[0].Name).To(Equal("test-port"))
|
||||
Expect(hostService.Spec.Ports[0].Port).To(Equal(int32(8888)))
|
||||
|
||||
err = virtTestEnv.k8sClient.Delete(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err := hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
|
||||
It("will not create a service on the host cluster if disabled", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
cluster.Spec.Sync.Services.Enabled = false
|
||||
err := hostTestEnv.k8sClient.Update(ctx, &cluster)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "service-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8888,
|
||||
TargetPort: intstr.FromInt32(8888),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = virtTestEnv.k8sClient.Create(ctx, service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Created service %s in virtual cluster", service.Name))
|
||||
|
||||
var hostService v1.Service
|
||||
hostServiceName := translateName(cluster, service.Namespace, service.Name)
|
||||
|
||||
Eventually(func() bool {
|
||||
key := client.ObjectKey{Name: hostServiceName, Namespace: namespace}
|
||||
err = hostTestEnv.k8sClient.Get(ctx, key, &hostService)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).
|
||||
WithPolling(time.Millisecond * 300).
|
||||
WithTimeout(time.Second * 10).
|
||||
Should(BeTrue())
|
||||
})
|
||||
}
|
||||
15
k3k-kubelet/controller/syncer/syncer.go
Normal file
15
k3k-kubelet/controller/syncer/syncer.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
)
|
||||
|
||||
type SyncerContext struct {
|
||||
ClusterName string
|
||||
ClusterNamespace string
|
||||
VirtualClient client.Client
|
||||
HostClient client.Client
|
||||
Translator translate.ToHostTranslator
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package controller_test
|
||||
package syncer_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
@@ -91,7 +92,7 @@ func NewTestEnv() *TestEnv {
|
||||
By("bootstrapping test environment")
|
||||
|
||||
testEnv := &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "charts", "k3k", "crds")},
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
BinaryAssetsDirectory: tempDir,
|
||||
Scheme: buildScheme(),
|
||||
@@ -165,5 +166,19 @@ var _ = Describe("Kubelet Controller", func() {
|
||||
cancel()
|
||||
})
|
||||
|
||||
Describe("PriorityClass", PriorityClassTests)
|
||||
Describe("PriorityClass Syncer", PriorityClassTests)
|
||||
Describe("ConfigMap Syncer", ConfigMapTests)
|
||||
Describe("Secret Syncer", SecretTests)
|
||||
Describe("Service Syncer", ServiceTests)
|
||||
Describe("Ingress Syncer", IngressTests)
|
||||
Describe("PersistentVolumeClaim Syncer", PVCTests)
|
||||
})
|
||||
|
||||
func translateName(cluster v1alpha1.Cluster, namespace, name string) string {
|
||||
translator := translate.ToHostTranslator{
|
||||
ClusterName: cluster.Name,
|
||||
ClusterNamespace: cluster.Namespace,
|
||||
}
|
||||
|
||||
return translator.TranslateName(namespace, name)
|
||||
}
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
k3kkubeletcontroller "github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
|
||||
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
@@ -156,28 +156,8 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
|
||||
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding service syncer controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("failed to add service syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pvc syncer controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("failed to add pvc syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod pvc controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("failed to add pod pvc controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding priorityclass controller")
|
||||
|
||||
if err := k3kkubeletcontroller.AddPriorityClassReconciler(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return nil, errors.New("failed to add priorityclass controller: " + err.Error())
|
||||
if err := addControllers(ctx, hostMgr, virtualMgr, c, hostClient); err != nil {
|
||||
return nil, errors.New("failed to add controller: " + err.Error())
|
||||
}
|
||||
|
||||
clusterIP, err := clusterIP(ctx, c.ServiceName, c.ClusterNamespace, hostClient)
|
||||
@@ -447,3 +427,56 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
|
||||
Certificates: []tls.Certificate{clientCert},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c *config, hostClient ctrlruntimeclient.Client) error {
|
||||
var cluster v1alpha1.Cluster
|
||||
|
||||
objKey := types.NamespacedName{
|
||||
Namespace: c.ClusterNamespace,
|
||||
Name: c.ClusterName,
|
||||
}
|
||||
|
||||
if err := hostClient.Get(ctx, objKey, &cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syncer.AddConfigMapSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add configmap global syncer: " + err.Error())
|
||||
}
|
||||
|
||||
if err := syncer.AddSecretSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add secret global syncer: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding service syncer controller")
|
||||
|
||||
if err := syncer.AddServiceSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add service syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding ingress syncer controller")
|
||||
|
||||
if err := syncer.AddIngressSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add ingress syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pvc syncer controller")
|
||||
|
||||
if err := syncer.AddPVCSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add pvc syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod pvc controller")
|
||||
|
||||
if err := syncer.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add pod pvc controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding priorityclass controller")
|
||||
|
||||
if err := syncer.AddPriorityClassSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add priorityclass controller: " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
@@ -32,13 +31,11 @@ import (
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller"
|
||||
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
|
||||
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
@@ -53,10 +50,10 @@ var _ nodeutil.Provider = (*Provider)(nil)
|
||||
// Provider implements nodetuil.Provider from virtual Kubelet.
|
||||
// TODO: Implement NotifyPods and the required usage so that this can be an async provider
|
||||
type Provider struct {
|
||||
Handler controller.ControllerHandler
|
||||
Translator translate.ToHostTranslator
|
||||
HostClient client.Client
|
||||
VirtualClient client.Client
|
||||
VirtualManager manager.Manager
|
||||
ClientConfig rest.Config
|
||||
CoreClient cv1.CoreV1Interface
|
||||
ClusterNamespace string
|
||||
@@ -80,16 +77,9 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3
|
||||
}
|
||||
|
||||
p := Provider{
|
||||
Handler: controller.ControllerHandler{
|
||||
Mgr: virtualMgr,
|
||||
Scheme: *virtualMgr.GetScheme(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
VirtualClient: virtualMgr.GetClient(),
|
||||
Translator: translator,
|
||||
Logger: logger,
|
||||
},
|
||||
HostClient: hostMgr.GetClient(),
|
||||
VirtualClient: virtualMgr.GetClient(),
|
||||
VirtualManager: virtualMgr,
|
||||
Translator: translator,
|
||||
ClientConfig: hostConfig,
|
||||
CoreClient: coreClient,
|
||||
@@ -340,7 +330,14 @@ func (p *Provider) CreatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
|
||||
// createPod takes a Kubernetes Pod and deploys it within the provider.
|
||||
func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
tPod := pod.DeepCopy()
|
||||
// fieldPath envs are not being translated correctly using the virtual kubelet pod controller
|
||||
// as a workaround we will try to fetch the pod from the virtual cluster and copy over the envSource
|
||||
var sourcePod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &sourcePod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tPod := sourcePod.DeepCopy()
|
||||
p.Translator.TranslateTo(tPod)
|
||||
|
||||
// get Cluster definition
|
||||
@@ -385,12 +382,12 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(pod, tPod); err != nil {
|
||||
if err := p.configureFieldPathEnv(&sourcePod, tPod); err != nil {
|
||||
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// volumes will often refer to resources in the virtual cluster, but instead need to refer to the sync'd
|
||||
// host cluster version
|
||||
if err := p.transformVolumes(ctx, pod.Namespace, tPod.Spec.Volumes); err != nil {
|
||||
if err := p.transformVolumes(pod.Namespace, tPod.Spec.Volumes); err != nil {
|
||||
return fmt.Errorf("unable to sync volumes for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// sync serviceaccount token to a the host cluster
|
||||
@@ -398,6 +395,10 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
for i, imagePullSecret := range tPod.Spec.ImagePullSecrets {
|
||||
tPod.Spec.ImagePullSecrets[i].Name = p.Translator.TranslateName(pod.Namespace, imagePullSecret.Name)
|
||||
}
|
||||
|
||||
// inject networking information to the pod including the virtual cluster controlplane endpoint
|
||||
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
|
||||
|
||||
@@ -443,58 +444,22 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev
|
||||
|
||||
// transformVolumes changes the volumes to the representation in the host cluster. Will return an error
|
||||
// if one/more volumes couldn't be transformed
|
||||
func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, volumes []corev1.Volume) error {
|
||||
func (p *Provider) transformVolumes(podNamespace string, volumes []corev1.Volume) error {
|
||||
for _, volume := range volumes {
|
||||
var optional bool
|
||||
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
continue
|
||||
}
|
||||
// note: this needs to handle downward api volumes as well, but more thought is needed on how to do that
|
||||
if volume.ConfigMap != nil {
|
||||
if volume.ConfigMap.Optional != nil {
|
||||
optional = *volume.ConfigMap.Optional
|
||||
}
|
||||
|
||||
if err := p.syncConfigmap(ctx, podNamespace, volume.ConfigMap.Name, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync configmap volume %s: %w", volume.Name, err)
|
||||
}
|
||||
|
||||
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
|
||||
} else if volume.Secret != nil {
|
||||
if volume.Secret.Optional != nil {
|
||||
optional = *volume.Secret.Optional
|
||||
}
|
||||
|
||||
if err := p.syncSecret(ctx, podNamespace, volume.Secret.SecretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync secret volume %s: %w", volume.Name, err)
|
||||
}
|
||||
|
||||
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
|
||||
} else if volume.Projected != nil {
|
||||
for _, source := range volume.Projected.Sources {
|
||||
if source.ConfigMap != nil {
|
||||
if source.ConfigMap.Optional != nil {
|
||||
optional = *source.ConfigMap.Optional
|
||||
}
|
||||
|
||||
configMapName := source.ConfigMap.Name
|
||||
if err := p.syncConfigmap(ctx, podNamespace, configMapName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected configmap %s: %w", configMapName, err)
|
||||
}
|
||||
|
||||
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, configMapName)
|
||||
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, source.ConfigMap.Name)
|
||||
} else if source.Secret != nil {
|
||||
if source.Secret.Optional != nil {
|
||||
optional = *source.Secret.Optional
|
||||
}
|
||||
|
||||
secretName := source.Secret.Name
|
||||
if err := p.syncSecret(ctx, podNamespace, secretName, optional); err != nil {
|
||||
return fmt.Errorf("unable to sync projected secret %s: %w", secretName, err)
|
||||
}
|
||||
|
||||
source.Secret.Name = p.Translator.TranslateName(podNamespace, secretName)
|
||||
source.Secret.Name = p.Translator.TranslateName(podNamespace, source.Secret.Name)
|
||||
}
|
||||
}
|
||||
} else if volume.PersistentVolumeClaim != nil {
|
||||
@@ -517,57 +482,6 @@ func (p *Provider) transformVolumes(ctx context.Context, podNamespace string, vo
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncConfigmap will add the configmap object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, configMapName string, optional bool) error {
|
||||
var configMap corev1.ConfigMap
|
||||
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: configMapName,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, nsName, &configMap); err != nil {
|
||||
// check if its optional configmap
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to get configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
if err := p.Handler.AddResource(ctx, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to add configmap to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
|
||||
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
|
||||
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
|
||||
|
||||
var secret corev1.Secret
|
||||
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: podNamespace,
|
||||
Name: secretName,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, nsName, &secret); err != nil {
|
||||
if apierrors.IsNotFound(err) && optional {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to get secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
if err := p.Handler.AddResource(ctx, &secret); err != nil {
|
||||
return fmt.Errorf("unable to add secret to sync %s/%s: %w", nsName.Namespace, nsName.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePod executes updatePod with retry
|
||||
func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return p.withRetry(ctx, p.updatePod, pod)
|
||||
@@ -611,6 +525,11 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(¤tVirtualPod, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
|
||||
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
|
||||
|
||||
@@ -678,81 +597,11 @@ func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
if err = p.pruneUnusedVolumes(ctx, pod); err != nil {
|
||||
// note that we don't return an error here. The pod was successfully deleted, another process
|
||||
// should clean this without affecting the user
|
||||
p.logger.Errorf("failed to prune leftover volumes for %s/%s: %w, resources may be left", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
p.logger.Infof("Deleted pod %s", pod.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneUnusedVolumes removes volumes in use by pod that aren't used by any other pods
|
||||
func (p *Provider) pruneUnusedVolumes(ctx context.Context, pod *corev1.Pod) error {
|
||||
rawSecrets, rawConfigMaps := getSecretsAndConfigmaps(pod)
|
||||
// since this pod was removed, originally mark all of the secrets/configmaps it uses as eligible
|
||||
// for pruning
|
||||
pruneSecrets := sets.Set[string]{}.Insert(rawSecrets...)
|
||||
pruneConfigMap := sets.Set[string]{}.Insert(rawConfigMaps...)
|
||||
|
||||
var pods corev1.PodList
|
||||
// only pods in the same namespace could be using secrets/configmaps that this pod is using
|
||||
err := p.VirtualClient.List(ctx, &pods, &client.ListOptions{
|
||||
Namespace: pod.Namespace,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods: %w", err)
|
||||
}
|
||||
|
||||
for _, vPod := range pods.Items {
|
||||
if vPod.Name == pod.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
secrets, configMaps := getSecretsAndConfigmaps(&vPod)
|
||||
pruneSecrets.Delete(secrets...)
|
||||
pruneConfigMap.Delete(configMaps...)
|
||||
}
|
||||
|
||||
for _, secretName := range pruneSecrets.UnsortedList() {
|
||||
var secret corev1.Secret
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: secretName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, key, &secret); err != nil {
|
||||
return fmt.Errorf("unable to get secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
|
||||
if err = p.Handler.RemoveResource(ctx, &secret); err != nil {
|
||||
return fmt.Errorf("unable to remove secret %s/%s for pod volume: %w", pod.Namespace, secretName, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, configMapName := range pruneConfigMap.UnsortedList() {
|
||||
var configMap corev1.ConfigMap
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: configMapName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := p.VirtualClient.Get(ctx, key, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to get configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
|
||||
if err = p.Handler.RemoveResource(ctx, &configMap); err != nil {
|
||||
return fmt.Errorf("unable to remove configMap %s/%s for pod volume: %w", pod.Namespace, configMapName, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPod retrieves a pod by name from the provider (can be cached).
|
||||
// The Pod returned is expected to be immutable, and may be accessed
|
||||
// concurrently outside of the calling goroutine. Therefore it is recommended
|
||||
@@ -869,22 +718,22 @@ func configureNetworking(pod *corev1.Pod, podName, podNamespace, serverIP, dnsIP
|
||||
|
||||
// inject networking information to the pod's environment variables
|
||||
for i := range pod.Spec.Containers {
|
||||
pod.Spec.Containers[i].Env = overrideEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
|
||||
pod.Spec.Containers[i].Env = mergeEnvVars(pod.Spec.Containers[i].Env, updatedEnvVars)
|
||||
}
|
||||
|
||||
// handle init containers as well
|
||||
for i := range pod.Spec.InitContainers {
|
||||
pod.Spec.InitContainers[i].Env = overrideEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
|
||||
pod.Spec.InitContainers[i].Env = mergeEnvVars(pod.Spec.InitContainers[i].Env, updatedEnvVars)
|
||||
}
|
||||
|
||||
// handle ephemeral containers as well
|
||||
for i := range pod.Spec.EphemeralContainers {
|
||||
pod.Spec.EphemeralContainers[i].Env = overrideEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
|
||||
pod.Spec.EphemeralContainers[i].Env = mergeEnvVars(pod.Spec.EphemeralContainers[i].Env, updatedEnvVars)
|
||||
}
|
||||
}
|
||||
|
||||
// overrideEnvVars will override the orig environment variables if found in the updated list
|
||||
func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
// mergeEnvVars will override the orig environment variables if found in the updated list and will add them to the list if not found
|
||||
func mergeEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
if len(updated) == 0 {
|
||||
return orig
|
||||
}
|
||||
@@ -895,42 +744,22 @@ func overrideEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
updatedEnvVarMap[updatedEnvVar.Name] = updatedEnvVar
|
||||
}
|
||||
|
||||
for i, origEnvVar := range orig {
|
||||
if updatedEnvVar, found := updatedEnvVarMap[origEnvVar.Name]; found {
|
||||
orig[i] = updatedEnvVar
|
||||
for i, env := range orig {
|
||||
if updatedEnv, ok := updatedEnvVarMap[env.Name]; ok {
|
||||
orig[i] = updatedEnv
|
||||
// Remove the updated variable from the map
|
||||
delete(updatedEnvVarMap, env.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Any variables remaining in the map are new and should be appended to the original slice.
|
||||
for _, env := range updatedEnvVarMap {
|
||||
orig = append(orig, env)
|
||||
}
|
||||
|
||||
return orig
|
||||
}
|
||||
|
||||
// getSecretsAndConfigmaps retrieves a list of all secrets/configmaps that are in use by a given pod. Useful
|
||||
// for removing/seeing which virtual cluster resources need to be in the host cluster.
|
||||
func getSecretsAndConfigmaps(pod *corev1.Pod) ([]string, []string) {
|
||||
var (
|
||||
secrets []string
|
||||
configMaps []string
|
||||
)
|
||||
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.Secret != nil {
|
||||
secrets = append(secrets, volume.Secret.SecretName)
|
||||
} else if volume.ConfigMap != nil {
|
||||
configMaps = append(configMaps, volume.ConfigMap.Name)
|
||||
} else if volume.Projected != nil {
|
||||
for _, source := range volume.Projected.Sources {
|
||||
if source.ConfigMap != nil {
|
||||
configMaps = append(configMaps, source.ConfigMap.Name)
|
||||
} else if source.Secret != nil {
|
||||
secrets = append(secrets, source.Secret.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return secrets, configMaps
|
||||
}
|
||||
|
||||
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
|
||||
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
|
||||
// assigned annotations
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func Test_overrideEnvVars(t *testing.T) {
|
||||
func Test_mergeEnvVars(t *testing.T) {
|
||||
type args struct {
|
||||
orig []corev1.EnvVar
|
||||
new []corev1.EnvVar
|
||||
@@ -32,7 +32,7 @@ func Test_overrideEnvVars(t *testing.T) {
|
||||
orig: []corev1.EnvVar{},
|
||||
new: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
want: []corev1.EnvVar{},
|
||||
want: []corev1.EnvVar{{Name: "FOO", Value: "new_val"}},
|
||||
},
|
||||
{
|
||||
name: "orig has a matching element",
|
||||
@@ -56,14 +56,14 @@ func Test_overrideEnvVars(t *testing.T) {
|
||||
orig: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}},
|
||||
new: []corev1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
},
|
||||
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}},
|
||||
want: []corev1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := overrideEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("overrideEnvVars() = %v, want %v", got, tt.want)
|
||||
if got := mergeEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("mergeEnvVars() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
54
main.go
54
main.go
@@ -5,6 +5,9 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -28,18 +31,14 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
clusterCIDR string
|
||||
sharedAgentImage string
|
||||
sharedAgentImagePullPolicy string
|
||||
kubeconfig string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
kubeletPortRange string
|
||||
webhookPortRange string
|
||||
maxConcurrentReconciles int
|
||||
debug bool
|
||||
logger *log.Logger
|
||||
scheme = runtime.NewScheme()
|
||||
config cluster.Config
|
||||
kubeconfig string
|
||||
kubeletPortRange string
|
||||
webhookPortRange string
|
||||
maxConcurrentReconciles int
|
||||
debug bool
|
||||
logger *log.Logger
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -64,13 +63,17 @@ func main() {
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Debug level logging")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
|
||||
rootCmd.PersistentFlags().StringVar(&clusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
|
||||
rootCmd.PersistentFlags().StringVar(&sharedAgentImage, "shared-agent-image", "", "K3K Virtual Kubelet image")
|
||||
rootCmd.PersistentFlags().StringVar(&sharedAgentImagePullPolicy, "shared-agent-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&config.ClusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
|
||||
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "shared-agent-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImagePullPolicy, "shared-agent-image-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "virtual-agent-image", "rancher/k3s", "K3S Virtual Agent image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "virtual-agent-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
|
||||
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
|
||||
rootCmd.PersistentFlags().StringVar(&k3SImage, "k3s-image", "rancher/k3k", "K3K server image")
|
||||
rootCmd.PersistentFlags().StringVar(&k3SImagePullPolicy, "k3s-image-pull-policy", "", "K3K server image pull policy")
|
||||
rootCmd.PersistentFlags().StringVar(&config.K3SServerImage, "k3s-server-image", "rancher/k3s", "K3K server image")
|
||||
rootCmd.PersistentFlags().StringVar(&config.K3SServerImagePullPolicy, "k3s-server-image-pull-policy", "", "K3K server image pull policy")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&config.ServerImagePullSecrets, "server-image-pull-secret", nil, "Image pull secret used for for servers")
|
||||
rootCmd.PersistentFlags().StringSliceVar(&config.AgentImagePullSecrets, "agent-image-pull-secret", nil, "Image pull secret used for for agents")
|
||||
rootCmd.PersistentFlags().IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 50, "maximum number of concurrent reconciles")
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
@@ -79,7 +82,8 @@ func main() {
|
||||
}
|
||||
|
||||
func run(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
logger.Info("Starting k3k - Version: " + buildinfo.Version)
|
||||
|
||||
@@ -109,7 +113,7 @@ func run(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy, maxConcurrentReconciles, portAllocator, nil); err != nil {
|
||||
if err := cluster.Add(ctx, mgr, &config, maxConcurrentReconciles, portAllocator, nil); err != nil {
|
||||
return fmt.Errorf("failed to add the new cluster controller: %v", err)
|
||||
}
|
||||
|
||||
@@ -121,7 +125,7 @@ func run(cmd *cobra.Command, args []string) error {
|
||||
|
||||
logger.Info("adding clusterpolicy controller")
|
||||
|
||||
if err := policy.Add(mgr, clusterCIDR, maxConcurrentReconciles); err != nil {
|
||||
if err := policy.Add(mgr, config.ClusterCIDR, maxConcurrentReconciles); err != nil {
|
||||
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
|
||||
}
|
||||
|
||||
@@ -129,14 +133,16 @@ func run(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("failed to start the manager: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("controller manager stopped")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validate() error {
|
||||
if sharedAgentImagePullPolicy != "" {
|
||||
if sharedAgentImagePullPolicy != string(v1.PullAlways) &&
|
||||
sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
|
||||
sharedAgentImagePullPolicy != string(v1.PullNever) {
|
||||
if config.SharedAgentImagePullPolicy != "" {
|
||||
if config.SharedAgentImagePullPolicy != string(v1.PullAlways) &&
|
||||
config.SharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
|
||||
config.SharedAgentImagePullPolicy != string(v1.PullNever) {
|
||||
return errors.New("invalid value for shared agent image policy")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,6 +103,7 @@ type ClusterSpec struct {
|
||||
// Expose specifies options for exposing the API server.
|
||||
// By default, it's only exposed as a ClusterIP.
|
||||
//
|
||||
// +kubebuilder:validation:XValidation:rule="[has(self.ingress), has(self.loadbalancer), has(self.nodePort)].filter(x, x).size() <= 1",message="ingress, loadbalancer and nodePort are mutually exclusive; only one can be set"
|
||||
// +optional
|
||||
Expose *ExposeConfig `json:"expose,omitempty"`
|
||||
|
||||
@@ -176,6 +177,118 @@ type ClusterSpec struct {
|
||||
//
|
||||
// +optional
|
||||
CustomCAs CustomCAs `json:"customCAs,omitempty"`
|
||||
|
||||
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
// +optional
|
||||
Sync *SyncConfig `json:"sync,omitempty"`
|
||||
}
|
||||
|
||||
// SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
|
||||
type SyncConfig struct {
|
||||
// Services resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
Services ServiceSyncConfig `json:"services,omitempty"`
|
||||
// ConfigMaps resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
ConfigMaps ConfigMapSyncConfig `json:"configmaps,omitempty"`
|
||||
// Secrets resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
Secrets SecretSyncConfig `json:"secrets,omitempty"`
|
||||
// Ingresses resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": false}
|
||||
Ingresses IngressSyncConfig `json:"ingresses,omitempty"`
|
||||
// PersistentVolumeClaims resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": true}
|
||||
PersistentVolumeClaims PersistentVolumeClaimSyncConfig `json:"persistentVolumeClaims,omitempty"`
|
||||
// PriorityClasses resources sync configuration.
|
||||
//
|
||||
// +kubebuilder:default={"enabled": false}
|
||||
PriorityClasses PriorityClassSyncConfig `json:"priorityClasses,omitempty"`
|
||||
}
|
||||
|
||||
// SecretSyncConfig specifies the sync options for services.
|
||||
type SecretSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceSyncConfig specifies the sync options for services.
|
||||
type ServiceSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// ConfigMapSyncConfig specifies the sync options for services.
|
||||
type ConfigMapSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// IngressSyncConfig specifies the sync options for services.
|
||||
type IngressSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
type PersistentVolumeClaimSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// PriorityClassSyncConfig specifies the sync options for services.
|
||||
type PriorityClassSyncConfig struct {
|
||||
// Enabled is an on/off switch for syncing resources.
|
||||
// +optional
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Selector specifies set of labels of the resources that will be synced, if empty
|
||||
// then all resources of the given type will be synced.
|
||||
//
|
||||
// +optional
|
||||
Selector map[string]string `json:"selector,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterMode is the possible provisioning mode of a Cluster.
|
||||
@@ -484,6 +597,12 @@ type VirtualClusterPolicySpec struct {
|
||||
//
|
||||
// +optional
|
||||
PodSecurityAdmissionLevel *PodSecurityAdmissionLevel `json:"podSecurityAdmissionLevel,omitempty"`
|
||||
|
||||
// Sync specifies the resources types that will be synced from virtual cluster to host cluster.
|
||||
//
|
||||
// +kubebuilder:default={}
|
||||
// +optional
|
||||
Sync *SyncConfig `json:"sync,omitempty"`
|
||||
}
|
||||
|
||||
// PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
|
||||
@@ -164,6 +164,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
}
|
||||
}
|
||||
out.CustomCAs = in.CustomCAs
|
||||
if in.Sync != nil {
|
||||
in, out := &in.Sync, &out.Sync
|
||||
*out = new(SyncConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
|
||||
@@ -203,6 +208,28 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConfigMapSyncConfig) DeepCopyInto(out *ConfigMapSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapSyncConfig.
|
||||
func (in *ConfigMapSyncConfig) DeepCopy() *ConfigMapSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ConfigMapSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CredentialSource) DeepCopyInto(out *CredentialSource) {
|
||||
*out = *in
|
||||
@@ -307,6 +334,28 @@ func (in *IngressConfig) DeepCopy() *IngressConfig {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressSyncConfig) DeepCopyInto(out *IngressSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSyncConfig.
|
||||
func (in *IngressSyncConfig) DeepCopy() *IngressSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LoadBalancerConfig) DeepCopyInto(out *LoadBalancerConfig) {
|
||||
*out = *in
|
||||
@@ -377,6 +426,115 @@ func (in *PersistenceConfig) DeepCopy() *PersistenceConfig {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistentVolumeClaimSyncConfig) DeepCopyInto(out *PersistentVolumeClaimSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSyncConfig.
|
||||
func (in *PersistentVolumeClaimSyncConfig) DeepCopy() *PersistentVolumeClaimSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PersistentVolumeClaimSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityClassSyncConfig) DeepCopyInto(out *PriorityClassSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassSyncConfig.
|
||||
func (in *PriorityClassSyncConfig) DeepCopy() *PriorityClassSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityClassSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretSyncConfig) DeepCopyInto(out *SecretSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSyncConfig.
|
||||
func (in *SecretSyncConfig) DeepCopy() *SecretSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceSyncConfig) DeepCopyInto(out *ServiceSyncConfig) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSyncConfig.
|
||||
func (in *ServiceSyncConfig) DeepCopy() *ServiceSyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceSyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SyncConfig) DeepCopyInto(out *SyncConfig) {
|
||||
*out = *in
|
||||
in.Services.DeepCopyInto(&out.Services)
|
||||
in.ConfigMaps.DeepCopyInto(&out.ConfigMaps)
|
||||
in.Secrets.DeepCopyInto(&out.Secrets)
|
||||
in.Ingresses.DeepCopyInto(&out.Ingresses)
|
||||
in.PersistentVolumeClaims.DeepCopyInto(&out.PersistentVolumeClaims)
|
||||
in.PriorityClasses.DeepCopyInto(&out.PriorityClasses)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncConfig.
|
||||
func (in *SyncConfig) DeepCopy() *SyncConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SyncConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VirtualClusterPolicy) DeepCopyInto(out *VirtualClusterPolicy) {
|
||||
*out = *in
|
||||
@@ -461,6 +619,11 @@ func (in *VirtualClusterPolicySpec) DeepCopyInto(out *VirtualClusterPolicySpec)
|
||||
*out = new(PodSecurityAdmissionLevel)
|
||||
**out = **in
|
||||
}
|
||||
if in.Sync != nil {
|
||||
in, out := &in.Sync, &out.Sync
|
||||
*out = new(SyncConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterPolicySpec.
|
||||
|
||||
@@ -31,23 +31,26 @@ const (
|
||||
|
||||
type SharedAgent struct {
|
||||
*Config
|
||||
serviceIP string
|
||||
image string
|
||||
imagePullPolicy string
|
||||
token string
|
||||
kubeletPort int
|
||||
webhookPort int
|
||||
serviceIP string
|
||||
image string
|
||||
imagePullPolicy string
|
||||
imageRegistry string
|
||||
token string
|
||||
kubeletPort int
|
||||
webhookPort int
|
||||
imagePullSecrets []string
|
||||
}
|
||||
|
||||
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int) *SharedAgent {
|
||||
func NewSharedAgent(config *Config, serviceIP, image, imagePullPolicy, token string, kubeletPort, webhookPort int, imagePullSecrets []string) *SharedAgent {
|
||||
return &SharedAgent{
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
image: image,
|
||||
imagePullPolicy: imagePullPolicy,
|
||||
token: token,
|
||||
kubeletPort: kubeletPort,
|
||||
webhookPort: webhookPort,
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
image: image,
|
||||
imagePullPolicy: imagePullPolicy,
|
||||
token: token,
|
||||
kubeletPort: kubeletPort,
|
||||
webhookPort: webhookPort,
|
||||
imagePullSecrets: imagePullSecrets,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,7 +159,13 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
dnsPolicy = v1.DNSClusterFirstWithHostNet
|
||||
}
|
||||
|
||||
return v1.PodSpec{
|
||||
image := s.image
|
||||
|
||||
if s.imageRegistry != "" {
|
||||
image = s.imageRegistry + "/" + s.image
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
HostNetwork: hostNetwork,
|
||||
DNSPolicy: dnsPolicy,
|
||||
ServiceAccountName: s.Name(),
|
||||
@@ -202,7 +211,7 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: s.Name(),
|
||||
Image: s.image,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{},
|
||||
@@ -254,6 +263,11 @@ func (s *SharedAgent) podSpec() v1.PodSpec {
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, imagePullSecret := range s.imagePullSecrets {
|
||||
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
func (s *SharedAgent) service(ctx context.Context) error {
|
||||
@@ -367,6 +381,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
|
||||
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/attach", "pods/exec", "pods/ephemeralcontainers", "secrets", "configmaps", "services"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"networking.k8s.io"},
|
||||
Resources: []string{"ingresses"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"k3k.io"},
|
||||
Resources: []string{"clusters"},
|
||||
|
||||
@@ -22,19 +22,22 @@ const (
|
||||
|
||||
type VirtualAgent struct {
|
||||
*Config
|
||||
serviceIP string
|
||||
token string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
serviceIP string
|
||||
token string
|
||||
Image string
|
||||
ImagePullPolicy string
|
||||
ImageRegistry string
|
||||
imagePullSecrets []string
|
||||
}
|
||||
|
||||
func NewVirtualAgent(config *Config, serviceIP, token string, k3SImage string, k3SImagePullPolicy string) *VirtualAgent {
|
||||
func NewVirtualAgent(config *Config, serviceIP, token, Image, ImagePullPolicy string, imagePullSecrets []string) *VirtualAgent {
|
||||
return &VirtualAgent{
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
token: token,
|
||||
k3SImage: k3SImage,
|
||||
k3SImagePullPolicy: k3SImagePullPolicy,
|
||||
Config: config,
|
||||
serviceIP: serviceIP,
|
||||
token: token,
|
||||
Image: Image,
|
||||
ImagePullPolicy: ImagePullPolicy,
|
||||
imagePullSecrets: imagePullSecrets,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +87,7 @@ with-node-id: true`, serviceIP, token)
|
||||
}
|
||||
|
||||
func (v *VirtualAgent) deployment(ctx context.Context) error {
|
||||
image := controller.K3SImage(v.cluster, v.k3SImage)
|
||||
image := controller.K3SImage(v.cluster, v.Image)
|
||||
|
||||
const name = "k3k-agent"
|
||||
|
||||
@@ -183,7 +186,7 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullPolicy(v.k3SImagePullPolicy),
|
||||
ImagePullPolicy: v1.PullPolicy(v.ImagePullPolicy),
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: ptr.To(true),
|
||||
},
|
||||
@@ -243,5 +246,9 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
|
||||
}
|
||||
}
|
||||
|
||||
for _, imagePullSecret := range v.imagePullSecrets {
|
||||
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@@ -61,26 +61,36 @@ var (
|
||||
ErrCustomCACertSecretMissing = errors.New("custom CA certificate secret is missing")
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
ClusterCIDR string
|
||||
SharedAgentImage string
|
||||
SharedAgentImagePullPolicy string
|
||||
VirtualAgentImage string
|
||||
VirtualAgentImagePullPolicy string
|
||||
K3SServerImage string
|
||||
K3SServerImagePullPolicy string
|
||||
ServerImagePullSecrets []string
|
||||
AgentImagePullSecrets []string
|
||||
}
|
||||
|
||||
type ClusterReconciler struct {
|
||||
DiscoveryClient *discovery.DiscoveryClient
|
||||
Client client.Client
|
||||
Scheme *runtime.Scheme
|
||||
PortAllocator *agent.PortAllocator
|
||||
|
||||
record.EventRecorder
|
||||
SharedAgentImage string
|
||||
SharedAgentImagePullPolicy string
|
||||
K3SImage string
|
||||
K3SImagePullPolicy string
|
||||
PortAllocator *agent.PortAllocator
|
||||
Config
|
||||
}
|
||||
|
||||
// Add adds a new controller to the manager
|
||||
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage string, k3SImagePullPolicy string, maxConcurrentReconciles int, portAllocator *agent.PortAllocator, eventRecorder record.EventRecorder) error {
|
||||
func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrentReconciles int, portAllocator *agent.PortAllocator, eventRecorder record.EventRecorder) error {
|
||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sharedAgentImage == "" {
|
||||
if config.SharedAgentImage == "" {
|
||||
return errors.New("missing shared agent image")
|
||||
}
|
||||
|
||||
@@ -90,15 +100,21 @@ func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgent
|
||||
|
||||
// initialize a new Reconciler
|
||||
reconciler := ClusterReconciler{
|
||||
DiscoveryClient: discoveryClient,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
EventRecorder: eventRecorder,
|
||||
SharedAgentImage: sharedAgentImage,
|
||||
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
|
||||
K3SImage: k3SImage,
|
||||
K3SImagePullPolicy: k3SImagePullPolicy,
|
||||
PortAllocator: portAllocator,
|
||||
DiscoveryClient: discoveryClient,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
EventRecorder: eventRecorder,
|
||||
PortAllocator: portAllocator,
|
||||
Config: Config{
|
||||
SharedAgentImage: config.SharedAgentImage,
|
||||
SharedAgentImagePullPolicy: config.SharedAgentImagePullPolicy,
|
||||
VirtualAgentImage: config.VirtualAgentImage,
|
||||
VirtualAgentImagePullPolicy: config.VirtualAgentImagePullPolicy,
|
||||
K3SServerImage: config.K3SServerImage,
|
||||
K3SServerImagePullPolicy: config.K3SServerImagePullPolicy,
|
||||
ServerImagePullSecrets: config.ServerImagePullSecrets,
|
||||
AgentImagePullSecrets: config.AgentImagePullSecrets,
|
||||
},
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
@@ -266,7 +282,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
|
||||
return err
|
||||
}
|
||||
|
||||
s := server.New(cluster, c.Client, token, c.K3SImage, c.K3SImagePullPolicy)
|
||||
s := server.New(cluster, c.Client, token, c.K3SServerImage, c.K3SServerImagePullPolicy, c.ServerImagePullSecrets)
|
||||
|
||||
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
|
||||
if cluster.Status.ClusterCIDR == "" {
|
||||
@@ -673,7 +689,7 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
|
||||
|
||||
var agentEnsurer agent.ResourceEnsurer
|
||||
if cluster.Spec.Mode == agent.VirtualNodeMode {
|
||||
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.K3SImage, c.K3SImagePullPolicy)
|
||||
agentEnsurer = agent.NewVirtualAgent(config, serviceIP, token, c.VirtualAgentImage, c.VirtualAgentImagePullPolicy, c.AgentImagePullSecrets)
|
||||
} else {
|
||||
// Assign port from pool if shared agent enabled mirroring of host nodes
|
||||
kubeletPort := 10250
|
||||
@@ -697,7 +713,7 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
|
||||
cluster.Status.WebhookPort = webhookPort
|
||||
}
|
||||
|
||||
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort)
|
||||
agentEnsurer = agent.NewSharedAgent(config, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token, kubeletPort, webhookPort, c.AgentImagePullSecrets)
|
||||
}
|
||||
|
||||
return agentEnsurer.EnsureResources(ctx)
|
||||
@@ -718,6 +734,11 @@ func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.
|
||||
}
|
||||
}
|
||||
|
||||
// validate sync policy
|
||||
if !equality.Semantic.DeepEqual(cluster.Spec.Sync, policy.Spec.Sync) {
|
||||
return fmt.Errorf("sync configuration %v is not allowed by the policy %q", cluster.Spec.Sync, policy.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -71,7 +71,12 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "", "rancher/k3s", "", 50, portAllocator, &record.FakeRecorder{})
|
||||
clusterConfig := &cluster.Config{
|
||||
SharedAgentImage: "rancher/k3k-kubelet:latest",
|
||||
K3SServerImage: "rancher/k3s",
|
||||
VirtualAgentImage: "rancher/k3s",
|
||||
}
|
||||
err = cluster.Add(ctx, mgr, clusterConfig, 50, portAllocator, &record.FakeRecorder{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -263,6 +263,26 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
Expect(etcdPort.TargetPort.IntValue()).To(BeEquivalentTo(2379))
|
||||
})
|
||||
})
|
||||
|
||||
When("exposing the cluster with nodePort and loadbalancer", func() {
|
||||
It("will fail", func() {
|
||||
cluster := &v1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "cluster-",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Expose: &v1alpha1.ExposeConfig{
|
||||
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
|
||||
NodePort: &v1alpha1.NodePortConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, cluster)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -32,22 +32,24 @@ const (
|
||||
|
||||
// Server
|
||||
type Server struct {
|
||||
cluster *v1alpha1.Cluster
|
||||
client client.Client
|
||||
mode string
|
||||
token string
|
||||
k3SImage string
|
||||
k3SImagePullPolicy string
|
||||
cluster *v1alpha1.Cluster
|
||||
client client.Client
|
||||
mode string
|
||||
token string
|
||||
image string
|
||||
imagePullPolicy string
|
||||
imagePullSecrets []string
|
||||
}
|
||||
|
||||
func New(cluster *v1alpha1.Cluster, client client.Client, token string, k3SImage string, k3SImagePullPolicy string) *Server {
|
||||
func New(cluster *v1alpha1.Cluster, client client.Client, token, image, imagePullPolicy string, imagePullSecrets []string) *Server {
|
||||
return &Server{
|
||||
cluster: cluster,
|
||||
client: client,
|
||||
token: token,
|
||||
mode: string(cluster.Spec.Mode),
|
||||
k3SImage: k3SImage,
|
||||
k3SImagePullPolicy: k3SImagePullPolicy,
|
||||
cluster: cluster,
|
||||
client: client,
|
||||
token: token,
|
||||
mode: string(cluster.Spec.Mode),
|
||||
image: image,
|
||||
imagePullPolicy: imagePullPolicy,
|
||||
imagePullSecrets: imagePullSecrets,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,7 +121,7 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
{
|
||||
Name: name,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullPolicy(s.k3SImagePullPolicy),
|
||||
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
@@ -242,6 +244,11 @@ func (s *Server) podSpec(image, name string, persistent bool, startupCmd string)
|
||||
|
||||
podSpec.Containers[0].Env = append(podSpec.Containers[0].Env, s.cluster.Spec.ServerEnvs...)
|
||||
|
||||
// add image pull secrets
|
||||
for _, imagePullSecret := range s.imagePullSecrets {
|
||||
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, v1.LocalObjectReference{Name: imagePullSecret})
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@@ -253,7 +260,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
|
||||
persistent bool
|
||||
)
|
||||
|
||||
image := controller.K3SImage(s.cluster, s.k3SImage)
|
||||
image := controller.K3SImage(s.cluster, s.image)
|
||||
name := controller.SafeConcatNameWithPrefix(s.cluster.Name, serverName)
|
||||
|
||||
replicas = *s.cluster.Spec.Servers
|
||||
|
||||
@@ -25,19 +25,21 @@ var Backoff = wait.Backoff{
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
// K3SImage returns the rancher/k3s image tagged with the specified Version.
|
||||
// Image returns the rancher/k3s image tagged with the specified Version.
|
||||
// If Version is empty it will use with the same k8s version of the host cluster,
|
||||
// stored in the Status object. It will return the untagged version as last fallback.
|
||||
// stored in the Status object. It will return the latest version as last fallback.
|
||||
func K3SImage(cluster *v1alpha1.Cluster, k3SImage string) string {
|
||||
image := k3SImage
|
||||
|
||||
imageVersion := "latest"
|
||||
|
||||
if cluster.Spec.Version != "" {
|
||||
return k3SImage + ":" + cluster.Spec.Version
|
||||
imageVersion = cluster.Spec.Version
|
||||
} else if cluster.Status.HostVersion != "" {
|
||||
imageVersion = cluster.Status.HostVersion
|
||||
}
|
||||
|
||||
if cluster.Status.HostVersion != "" {
|
||||
return k3SImage + ":" + cluster.Status.HostVersion
|
||||
}
|
||||
|
||||
return k3SImage
|
||||
return image + ":" + imageVersion
|
||||
}
|
||||
|
||||
// SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix.
|
||||
|
||||
77
pkg/controller/controller_test.go
Normal file
77
pkg/controller/controller_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
)
|
||||
|
||||
func Test_K3S_Image(t *testing.T) {
|
||||
type args struct {
|
||||
cluster *v1alpha1.Cluster
|
||||
k3sImage string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
expectedData string
|
||||
}{
|
||||
{
|
||||
name: "cluster with assigned version spec",
|
||||
args: args{
|
||||
k3sImage: "rancher/k3s",
|
||||
cluster: &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
Spec: v1alpha1.ClusterSpec{
|
||||
Version: "v1.2.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedData: "rancher/k3s:v1.2.3",
|
||||
},
|
||||
{
|
||||
name: "cluster with empty version spec and assigned hostVersion status",
|
||||
args: args{
|
||||
k3sImage: "rancher/k3s",
|
||||
cluster: &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
Status: v1alpha1.ClusterStatus{
|
||||
HostVersion: "v4.5.6",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedData: "rancher/k3s:v4.5.6",
|
||||
},
|
||||
{
|
||||
name: "cluster with empty version spec and empty hostVersion status",
|
||||
args: args{
|
||||
k3sImage: "rancher/k3s",
|
||||
cluster: &v1alpha1.Cluster{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycluster",
|
||||
Namespace: "ns-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedData: "rancher/k3s:latest",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fullImage := K3SImage(tt.args.cluster, tt.args.k3sImage)
|
||||
assert.Equal(t, tt.expectedData, fullImage)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
@@ -195,8 +196,12 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
|
||||
// Check baseline
|
||||
|
||||
// get policy again
|
||||
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(policy), policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
policy.Spec.PodSecurityAdmissionLevel = &baseline
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait a bit for the namespace to be updated
|
||||
@@ -486,8 +491,11 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
|
||||
// get policy again
|
||||
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(policy), policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
policy.Spec.Quota = nil
|
||||
err := k8sClient.Update(ctx, policy)
|
||||
err = k8sClient.Update(ctx, policy)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// wait for a bit for the resourceQuota to be deleted
|
||||
|
||||
113
tests/cluster_certs_test.go
Normal file
113
tests/cluster_certs_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx := context.Background()
|
||||
|
||||
namespace := NewNamespace()
|
||||
|
||||
// create custom cert secret
|
||||
customCertDir := "testdata/customcerts/"
|
||||
|
||||
certList := []string{
|
||||
"server-ca",
|
||||
"client-ca",
|
||||
"request-header-ca",
|
||||
"service",
|
||||
"etcd-peer-ca",
|
||||
"etcd-server-ca",
|
||||
}
|
||||
|
||||
for _, certName := range certList {
|
||||
var cert, key []byte
|
||||
var err error
|
||||
filePathPrefix := ""
|
||||
certfile := certName
|
||||
if strings.HasPrefix(certName, "etcd") {
|
||||
filePathPrefix = "etcd/"
|
||||
certfile = strings.TrimPrefix(certName, "etcd-")
|
||||
}
|
||||
if !strings.Contains(certName, "service") {
|
||||
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
certSecret := caCertSecret(certName, namespace.Name, cert, key)
|
||||
err = k8sClient.Create(ctx, certSecret)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
|
||||
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
|
||||
Enabled: true,
|
||||
Sources: v1alpha1.CredentialSources{
|
||||
ServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: "server-ca",
|
||||
},
|
||||
ClientCA: v1alpha1.CredentialSource{
|
||||
SecretName: "client-ca",
|
||||
},
|
||||
ETCDServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: "etcd-server-ca",
|
||||
},
|
||||
ETCDPeerCA: v1alpha1.CredentialSource{
|
||||
SecretName: "etcd-peer-ca",
|
||||
},
|
||||
RequestHeaderCA: v1alpha1.CredentialSource{
|
||||
SecretName: "request-header-ca",
|
||||
},
|
||||
ServiceAccountToken: v1alpha1.CredentialSource{
|
||||
SecretName: "service",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
})
|
||||
|
||||
It("will load the custom certs in the server pod", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
// check server-ca.crt
|
||||
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"
|
||||
serverCACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, serverCACrtPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
serverCACrtTestFile, err := os.ReadFile("testdata/customcerts/server-ca.crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(serverCACrt).To(Equal(serverCACrtTestFile))
|
||||
})
|
||||
})
|
||||
124
tests/cluster_persistence_test.go
Normal file
124
tests/cluster_persistence_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("an ephemeral cluster is installed", Label("e2e"), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
|
||||
It("can create a nginx pod", func() {
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("regenerates the bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := virtualCluster.Client.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
|
||||
By("Server pod up and running again")
|
||||
|
||||
By("Using old k8s client configuration should fail")
|
||||
|
||||
Eventually(func() bool {
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
var unknownAuthorityErr x509.UnknownAuthorityError
|
||||
return errors.As(err, &unknownAuthorityErr)
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeTrue())
|
||||
|
||||
By("Recover new config should succeed")
|
||||
|
||||
Eventually(func() error {
|
||||
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a dynamic cluster is installed", Label("e2e"), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
virtualCluster = NewVirtualClusterWithType(v1alpha1.DynamicPersistenceMode)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
|
||||
It("can create a nginx pod", func() {
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("uses the same bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := virtualCluster.Client.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
restartServerPod(ctx, virtualCluster)
|
||||
|
||||
By("Server pod up and running again")
|
||||
|
||||
By("Using old k8s client configuration should succeed")
|
||||
|
||||
Eventually(func() error {
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
return err
|
||||
}).
|
||||
WithTimeout(2 * time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
})
|
||||
})
|
||||
@@ -1,277 +0,0 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("k3k is installed", Label("e2e"), func() {
|
||||
It("is in Running status", func() {
|
||||
// check that the controller is running
|
||||
Eventually(func() bool {
|
||||
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
|
||||
podList, err := k8s.CoreV1().Pods("k3k-system").List(context.Background(), opts)
|
||||
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(podList.Items).To(Not(BeEmpty()))
|
||||
|
||||
var isRunning bool
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
isRunning = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return isRunning
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
|
||||
It("can create a nginx pod", func() {
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("regenerates the bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := virtualCluster.Client.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
}).
|
||||
WithTimeout(time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
|
||||
By("Server pod up and running again")
|
||||
|
||||
By("Using old k8s client configuration should fail")
|
||||
|
||||
Eventually(func() bool {
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
var unknownAuthorityErr x509.UnknownAuthorityError
|
||||
return errors.As(err, &unknownAuthorityErr)
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeTrue())
|
||||
|
||||
By("Recover new config should succeed")
|
||||
|
||||
Eventually(func() error {
|
||||
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a dynamic cluster is installed", func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
cluster := NewCluster(namespace.Name)
|
||||
cluster.Spec.Persistence.Type = v1alpha1.DynamicPersistenceMode
|
||||
CreateCluster(cluster)
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
|
||||
It("can create a nginx pod", func() {
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
})
|
||||
|
||||
It("use the same bootstrap secret after a restart", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := virtualCluster.Client.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
}).
|
||||
WithTimeout(60 * time.Second).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
|
||||
By("Server pod up and running again")
|
||||
|
||||
By("Using old k8s client configuration should succeed")
|
||||
|
||||
Eventually(func() error {
|
||||
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
|
||||
return err
|
||||
}).
|
||||
WithTimeout(2 * time.Minute).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), func() {
|
||||
ctx := context.Background()
|
||||
var virtualCluster *VirtualCluster
|
||||
BeforeEach(func() {
|
||||
namespace := NewNamespace()
|
||||
// create custom cert secret
|
||||
customCertDir := "testdata/customcerts/"
|
||||
certList := []string{
|
||||
"server-ca",
|
||||
"client-ca",
|
||||
"request-header-ca",
|
||||
"service",
|
||||
"etcd-peer-ca",
|
||||
"etcd-server-ca",
|
||||
}
|
||||
for _, certName := range certList {
|
||||
var cert, key []byte
|
||||
var err error
|
||||
filePathPrefix := ""
|
||||
certfile := certName
|
||||
if strings.HasPrefix(certName, "etcd") {
|
||||
filePathPrefix = "etcd/"
|
||||
certfile = strings.TrimPrefix(certName, "etcd-")
|
||||
}
|
||||
if !strings.Contains(certName, "service") {
|
||||
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
certSecret := caCertSecret(certName, namespace.Name, cert, key)
|
||||
err = k8sClient.Create(ctx, certSecret)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
|
||||
Enabled: true,
|
||||
Sources: v1alpha1.CredentialSources{
|
||||
ServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: "server-ca",
|
||||
},
|
||||
ClientCA: v1alpha1.CredentialSource{
|
||||
SecretName: "client-ca",
|
||||
},
|
||||
ETCDServerCA: v1alpha1.CredentialSource{
|
||||
SecretName: "etcd-server-ca",
|
||||
},
|
||||
ETCDPeerCA: v1alpha1.CredentialSource{
|
||||
SecretName: "etcd-peer-ca",
|
||||
},
|
||||
RequestHeaderCA: v1alpha1.CredentialSource{
|
||||
SecretName: "request-header-ca",
|
||||
},
|
||||
ServiceAccountToken: v1alpha1.CredentialSource{
|
||||
SecretName: "service",
|
||||
},
|
||||
},
|
||||
}
|
||||
CreateCluster(cluster)
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
|
||||
virtualCluster = &VirtualCluster{
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
}
|
||||
})
|
||||
It("will load the custom certs in the server pod", func() {
|
||||
_, _ = virtualCluster.NewNginxPod("")
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
// check server-ca.crt
|
||||
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"
|
||||
serverCACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, serverCACrtPath)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
serverCACrtTestFile, err := os.ReadFile("testdata/customcerts/server-ca.crt")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(serverCACrt).To(Equal(serverCACrtTestFile))
|
||||
})
|
||||
})
|
||||
@@ -34,13 +34,20 @@ type VirtualCluster struct {
|
||||
Client *kubernetes.Clientset
|
||||
}
|
||||
|
||||
func NewVirtualCluster() *VirtualCluster {
|
||||
func NewVirtualCluster() *VirtualCluster { // By default, create an ephemeral cluster
|
||||
return NewVirtualClusterWithType(v1alpha1.EphemeralPersistenceMode)
|
||||
}
|
||||
|
||||
func NewVirtualClusterWithType(persistenceType v1alpha1.PersistenceMode) *VirtualCluster {
|
||||
GinkgoHelper()
|
||||
|
||||
namespace := NewNamespace()
|
||||
|
||||
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
|
||||
|
||||
cluster := NewCluster(namespace.Name)
|
||||
cluster.Spec.Persistence.Type = persistenceType
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
@@ -151,12 +158,9 @@ func CreateCluster(cluster *v1alpha1.Cluster) {
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
imageName := pod.Spec.Containers[0].Image
|
||||
imageName = strings.Split(imageName, ":")[0] // remove tag
|
||||
|
||||
switch imageName {
|
||||
case "rancher/k3s":
|
||||
if strings.Contains(imageName, "rancher/k3s") {
|
||||
serverRunning = pod.Status.Phase == corev1.PodRunning
|
||||
case "rancher/k3k-kubelet":
|
||||
} else if strings.Contains(imageName, "rancher/k3k-kubelet") {
|
||||
kubeletRunning = pod.Status.Phase == corev1.PodRunning
|
||||
}
|
||||
|
||||
@@ -302,3 +306,29 @@ func (c *VirtualCluster) ExecCmd(pod *corev1.Pod, command string) (string, strin
|
||||
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
|
||||
func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
|
||||
GinkgoHelper()
|
||||
|
||||
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
|
||||
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
serverPod := serverPods.Items[0]
|
||||
|
||||
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
|
||||
|
||||
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
By("Deleting server pod")
|
||||
|
||||
// check that the server pods restarted
|
||||
Eventually(func() any {
|
||||
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(len(serverPods.Items)).To(Equal(1))
|
||||
return serverPods.Items[0].DeletionTimestamp
|
||||
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
|
||||
}
|
||||
|
||||
35
tests/installation_test.go
Normal file
35
tests/installation_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("k3k is installed", Label("e2e"), func() {
|
||||
It("is in Running status", func() {
|
||||
// check that the controller is running
|
||||
Eventually(func() bool {
|
||||
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
|
||||
podList, err := k8s.CoreV1().Pods("k3k-system").List(context.Background(), opts)
|
||||
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(podList.Items).To(Not(BeEmpty()))
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}).
|
||||
WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
})
|
||||
})
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,7 +18,9 @@ import (
|
||||
"go.uber.org/zap"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@@ -34,6 +37,11 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
k3kNamespace = "k3k-system"
|
||||
k3kName = "k3k"
|
||||
)
|
||||
|
||||
func TestTests(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Tests Suite")
|
||||
@@ -79,7 +87,9 @@ var _ = BeforeSuite(func() {
|
||||
DeferCleanup(os.Remove, kubeconfigPath)
|
||||
|
||||
initKubernetesClient(kubeconfig)
|
||||
installK3kChart(kubeconfig)
|
||||
installK3kChart(ctx, kubeconfig)
|
||||
|
||||
patchPVC(ctx, k8s)
|
||||
})
|
||||
|
||||
func initKubernetesClient(kubeconfig []byte) {
|
||||
@@ -100,81 +110,6 @@ func initKubernetesClient(kubeconfig []byte) {
|
||||
log.SetLogger(zapr.NewLogger(logger))
|
||||
}
|
||||
|
||||
func installK3kChart(kubeconfig []byte) {
|
||||
pwd, err := os.Getwd()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k3kChart, err := loader.Load(path.Join(pwd, "../charts/k3k"))
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
actionConfig := new(action.Configuration)
|
||||
|
||||
restClientGetter, err := NewRESTClientGetter(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
releaseName := "k3k"
|
||||
releaseNamespace := "k3k-system"
|
||||
|
||||
err = actionConfig.Init(restClientGetter, releaseNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
|
||||
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
iCli := action.NewInstall(actionConfig)
|
||||
iCli.ReleaseName = releaseName
|
||||
iCli.Namespace = releaseNamespace
|
||||
iCli.CreateNamespace = true
|
||||
iCli.Timeout = time.Minute
|
||||
iCli.Wait = true
|
||||
|
||||
imageMap, _ := k3kChart.Values["image"].(map[string]any)
|
||||
maps.Copy(imageMap, map[string]any{
|
||||
"repository": "rancher/k3k",
|
||||
"tag": "dev",
|
||||
"pullPolicy": "IfNotPresent",
|
||||
})
|
||||
|
||||
sharedAgentMap, _ := k3kChart.Values["sharedAgent"].(map[string]any)
|
||||
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
|
||||
maps.Copy(sharedAgentImageMap, map[string]any{
|
||||
"repository": "rancher/k3k-kubelet",
|
||||
"tag": "dev",
|
||||
})
|
||||
|
||||
err = k3sContainer.LoadImages(context.Background(), "rancher/k3k:dev", "rancher/k3k-kubelet:dev")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
release, err := iCli.Run(k3kChart, k3kChart.Values)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Printf("Release %s installed in %s namespace\n", release.Name, release.Namespace)
|
||||
}
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
// dump k3s logs
|
||||
readCloser, err := k3sContainer.Logs(context.Background())
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
logs, err := io.ReadAll(readCloser)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
logfile := path.Join(os.TempDir(), "k3s.log")
|
||||
err = os.WriteFile(logfile, logs, 0o644)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Println("k3s logs written to: " + logfile)
|
||||
|
||||
// dump k3k controller logs
|
||||
readCloser, err = k3sContainer.Logs(context.Background())
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
writeLogs("k3s.log", readCloser)
|
||||
|
||||
// dump k3k logs
|
||||
writeK3kLogs()
|
||||
|
||||
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
|
||||
})
|
||||
|
||||
func buildScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
|
||||
@@ -186,21 +121,231 @@ func buildScheme() *runtime.Scheme {
|
||||
return scheme
|
||||
}
|
||||
|
||||
func writeK3kLogs() {
|
||||
var (
|
||||
err error
|
||||
podList corev1.PodList
|
||||
)
|
||||
func installK3kChart(ctx context.Context, kubeconfig []byte) {
|
||||
pwd, err := os.Getwd()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k3kChart, err := loader.Load(path.Join(pwd, "../charts/k3k"))
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
actionConfig := new(action.Configuration)
|
||||
|
||||
restClientGetter, err := NewRESTClientGetter(kubeconfig)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
err = actionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
|
||||
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
iCli := action.NewInstall(actionConfig)
|
||||
iCli.ReleaseName = k3kName
|
||||
iCli.Namespace = k3kNamespace
|
||||
iCli.CreateNamespace = true
|
||||
iCli.Timeout = time.Minute
|
||||
iCli.Wait = true
|
||||
|
||||
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
|
||||
imageMap, _ := controllerMap["image"].(map[string]any)
|
||||
maps.Copy(imageMap, map[string]any{
|
||||
"repository": "rancher/k3k",
|
||||
"tag": "dev",
|
||||
"pullPolicy": "IfNotPresent",
|
||||
})
|
||||
|
||||
agentMap, _ := k3kChart.Values["agent"].(map[string]any)
|
||||
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
|
||||
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
|
||||
maps.Copy(sharedAgentImageMap, map[string]any{
|
||||
"repository": "rancher/k3k-kubelet",
|
||||
"tag": "dev",
|
||||
})
|
||||
|
||||
err = k3sContainer.LoadImages(ctx, "rancher/k3k:dev", "rancher/k3k-kubelet:dev")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
release, err := iCli.Run(k3kChart, k3kChart.Values)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Printf("Release %s installed in %s namespace\n", release.Name, release.Namespace)
|
||||
}
|
||||
|
||||
func patchPVC(ctx context.Context, clientset *kubernetes.Clientset) {
|
||||
pvc := &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "coverage-data-pvc",
|
||||
Namespace: k3kNamespace,
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
},
|
||||
Resources: corev1.VolumeResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: resource.MustParse("100M"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := clientset.CoreV1().PersistentVolumeClaims(k3kNamespace).Create(ctx, pvc, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
patchData := []byte(`
|
||||
{
|
||||
"spec": {
|
||||
"template": {
|
||||
"spec": {
|
||||
"volumes": [
|
||||
{
|
||||
"name": "tmp-covdata",
|
||||
"persistentVolumeClaim": {
|
||||
"claimName": "coverage-data-pvc"
|
||||
}
|
||||
}
|
||||
],
|
||||
"containers": [
|
||||
{
|
||||
"name": "k3k",
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "tmp-covdata",
|
||||
"mountPath": "/tmp/covdata"
|
||||
}
|
||||
],
|
||||
"env": [
|
||||
{
|
||||
"name": "GOCOVERDIR",
|
||||
"value": "/tmp/covdata"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
GinkgoWriter.Printf("Applying patch to deployment '%s' in namespace '%s'...\n", k3kName, k3kNamespace)
|
||||
|
||||
_, err = clientset.AppsV1().Deployments(k3kNamespace).Patch(
|
||||
ctx,
|
||||
k3kName,
|
||||
types.StrategicMergePatchType,
|
||||
patchData,
|
||||
metav1.PatchOptions{},
|
||||
)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
Eventually(func() bool {
|
||||
GinkgoWriter.Println("Checking K3k deployment status")
|
||||
|
||||
dep, err := clientset.AppsV1().Deployments(k3kNamespace).Get(ctx, k3kName, metav1.GetOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
// 1. Check if the controller has observed the latest generation
|
||||
if dep.Generation > dep.Status.ObservedGeneration {
|
||||
GinkgoWriter.Printf("K3k deployment generation: %d, observed generation: %d\n", dep.Generation, dep.Status.ObservedGeneration)
|
||||
return false
|
||||
}
|
||||
|
||||
// 2. Check if all replicas have been updated
|
||||
if dep.Spec.Replicas != nil && dep.Status.UpdatedReplicas < *dep.Spec.Replicas {
|
||||
GinkgoWriter.Printf("K3k deployment replicas: %d, updated replicas: %d\n", *dep.Spec.Replicas, dep.Status.UpdatedReplicas)
|
||||
return false
|
||||
}
|
||||
|
||||
// 3. Check if all updated replicas are available
|
||||
if dep.Status.AvailableReplicas < dep.Status.UpdatedReplicas {
|
||||
GinkgoWriter.Printf("K3k deployment availabl replicas: %d, updated replicas: %d\n", dep.Status.AvailableReplicas, dep.Status.UpdatedReplicas)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Second * 30).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
ctx := context.Background()
|
||||
err = k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: "k3k-system"})
|
||||
|
||||
goCoverDir := os.Getenv("GOCOVERDIR")
|
||||
if goCoverDir == "" {
|
||||
goCoverDir = path.Join(os.TempDir(), "covdata")
|
||||
Expect(os.MkdirAll(goCoverDir, 0o755)).To(Succeed())
|
||||
}
|
||||
|
||||
dumpK3kCoverageData(ctx, goCoverDir)
|
||||
|
||||
// dump k3s logs
|
||||
k3sLogs, err := k3sContainer.Logs(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
writeLogs("k3s.log", k3sLogs)
|
||||
|
||||
// dump k3k controller logs
|
||||
k3kLogs := getK3kLogs(ctx)
|
||||
writeLogs("k3k.log", k3kLogs)
|
||||
|
||||
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
|
||||
})
|
||||
|
||||
// dumpK3kCoverageData will kill the K3k controller container to force it to dump the coverage data.
|
||||
// It will then download the files with kubectl cp into the specified folder. If the folder doesn't exists it will be created.
|
||||
func dumpK3kCoverageData(ctx context.Context, folder string) {
|
||||
GinkgoWriter.Println("Restarting k3k controller...")
|
||||
|
||||
var podList corev1.PodList
|
||||
|
||||
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k3kPod := podList.Items[0]
|
||||
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &corev1.PodLogOptions{})
|
||||
|
||||
cmd := exec.Command("kubectl", "exec", "-n", k3kNamespace, k3kPod.Name, "-c", "k3k", "--", "kill", "1")
|
||||
output, err := cmd.CombinedOutput()
|
||||
Expect(err).NotTo(HaveOccurred(), string(output))
|
||||
|
||||
Eventually(func() corev1.PodPhase {
|
||||
var pod corev1.Pod
|
||||
|
||||
key := types.NamespacedName{
|
||||
Namespace: k3kNamespace,
|
||||
Name: k3kPod.Name,
|
||||
}
|
||||
err = k8sClient.Get(ctx, key, &pod)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
GinkgoWriter.Printf("K3k controller status: %s\n", pod.Status.Phase)
|
||||
|
||||
return pod.Status.Phase
|
||||
}).
|
||||
MustPassRepeatedly(5).
|
||||
WithPolling(time.Second * 2).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Equal(corev1.PodRunning))
|
||||
|
||||
GinkgoWriter.Printf("Downloading covdata from k3k controller to %s\n", folder)
|
||||
|
||||
cmd = exec.Command("kubectl", "cp", fmt.Sprintf("%s/%s:/tmp/covdata", k3kNamespace, k3kPod.Name), folder)
|
||||
output, err = cmd.CombinedOutput()
|
||||
Expect(err).NotTo(HaveOccurred(), string(output))
|
||||
}
|
||||
|
||||
func getK3kLogs(ctx context.Context) io.ReadCloser {
|
||||
var podList corev1.PodList
|
||||
|
||||
err := k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: k3kNamespace})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
k3kPod := podList.Items[0]
|
||||
req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &corev1.PodLogOptions{Previous: true})
|
||||
podLogs, err := req.Stream(ctx)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
writeLogs("k3k.log", podLogs)
|
||||
|
||||
return podLogs
|
||||
}
|
||||
|
||||
func writeLogs(filename string, logs io.ReadCloser) {
|
||||
@@ -223,7 +368,7 @@ func readFileWithinPod(ctx context.Context, client *kubernetes.Clientset, config
|
||||
|
||||
stderr, err := podExec(ctx, client, config, namespace, name, command, nil, output)
|
||||
if err != nil || len(stderr) > 0 {
|
||||
return nil, fmt.Errorf("faile to read the following file %s: %v", path, err)
|
||||
return nil, fmt.Errorf("failed to read the following file %s: %v", path, err)
|
||||
}
|
||||
|
||||
return output.Bytes(), nil
|
||||
|
||||
Reference in New Issue
Block a user