Compare commits

...

17 Commits

Author SHA1 Message Date
Enrico Candino
2206632dcc bump charts (#507) 2025-10-14 15:19:00 +02:00
Enrico Candino
8ffdc9bafd renaming webhook (#506) 2025-10-13 17:25:17 +02:00
Enrico Candino
594c2571c3 promoted v1alpha1 resources to v1beta1 (#505) 2025-10-13 17:24:56 +02:00
Hussein Galal
12971f55a6 Add k8s version upgrade test (#503)
* Add k8s version upgrade test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove unused functions

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-13 17:14:25 +03:00
Enrico Candino
99f750525f Fix extraEnv and other Helm values (#500)
* fix for extraEnv

* moved env var to flags

* changed resources as object

* renamed replicaCount to replicas

* cleanup spaces

* moved some values and spacing

* renamed some flags
2025-10-13 12:50:07 +02:00
Hussein Galal
a0fd472841 Use K3S host cluster for E2E tests (#492)
* Add kubeconfig to e2e_tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add E2E_KUBECONFIG env variable

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix yaml permissions for kubeconfig

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix image name and use ttl.sh

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add uuidgen result to a file

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add hostIP

* Add k3s version to e2e test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove comment

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove virtual mode tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix failed test

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add KUBECONFIG env variable to the make install

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* add k3kcli to github_path

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Use docker installation for testing the cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fix test cli

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* lint

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* typo

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-08 15:39:35 +03:00
Enrico Candino
7387fc1b23 Fix Service reconciliation error loop (#497)
* fix service reconciliation error by adding checks for virtual service annotations

* renamed var
2025-10-08 14:03:50 +02:00
Enrico Candino
9f265c73d9 Fix for HA server deletion (#493)
* wip

* wip

* wip

* removed todo
2025-10-08 13:23:15 +02:00
Enrico Candino
00ef6d582c Add log-format, and cleanup (#494)
* using logr.Logger

* testing levels

* adding log format

* fix lint

* removed tests

* final cleanup
2025-10-08 13:19:57 +02:00
Enrico Candino
5c95ca3dfa Fix for pod eviction in host cluster (#484)
* update statefulset controller

* fix for single pod

* adding pod controller

* added test

* removed comment

* merged service controller

* revert statefulset

* added test

* added common owner filter
2025-10-03 16:22:54 +02:00
jpgouin
6523b8339b change the default storage request size request to 2Gi (#490)
* change the default storage request size request to 2Gi
2025-10-03 09:04:13 +02:00
Hussein Galal
80037e815f Adding upgrade path tests (#481)
* Adding upgrade path tests

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Remove update label

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-02 14:53:08 +03:00
Enrico Candino
7585611792 Rename PodController to StatefulSetController (#482)
* renamed pod.go

* update statefulset controller

* fix for single pod

* added test, revert finalizer

* wip ha deletion

* revert logic

* remove focus
2025-10-01 17:06:24 +02:00
Hussein Galal
0bd681ab60 Lb service status sync (#451)
* Sync service LB status back to virtual service

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Sync service LB status back to virtual service

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* wsl

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-10-01 13:25:31 +03:00
Hussein Galal
4fe36b3d0c Bump Chart to v0.3.5 (#485)
* Bump Chart to v0.3.5

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Bump Chart to v0.3.5

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-30 15:26:59 +03:00
Enrico Candino
01589bb359 splitting tests (#461) 2025-09-23 12:07:49 +02:00
Hussein Galal
30217df268 Bump chart to v0.3.5-rc1 (#467)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-09-17 12:14:44 +03:00
90 changed files with 2260 additions and 1119 deletions

View File

@@ -106,7 +106,7 @@ jobs:
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster
@@ -259,7 +259,7 @@ jobs:
kubectl create namespace k3k-mycluster
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -80,17 +80,30 @@ jobs:
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Set coverage environment
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "REPO=ttl.sh/$(uuidgen)" >> $GITHUB_ENV
echo "VERSION=1h" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Build and package
- name: Install k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${{ env.K3S_HOST_VERSION }} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s -
- name: Build and package and push dev images
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: |
make build
make package
make push
make install
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
@@ -99,7 +112,11 @@ jobs:
run: k3kcli -v
- name: Run e2e tests
run: make test-e2e
env:
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
REPO: ${{ env.REPO }}
VERSION: ${{ env.VERSION }}
run: make test-e2e
- name: Convert coverage data
run: go tool covdata textfmt -i=${GOCOVERDIR} -o ${GOCOVERDIR}/cover.out
@@ -150,12 +167,13 @@ jobs:
- name: Install Ginkgo
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Set coverage environment
- name: Setup environment
run: |
mkdir ${{ github.workspace }}/covdata
echo "COVERAGE=true" >> $GITHUB_ENV
echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV
echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV"
- name: Build and package
run: |
@@ -169,6 +187,9 @@ jobs:
run: k3kcli -v
- name: Run cli tests
env:
K3K_DOCKER_INSTALL: "true"
K3S_HOST_VERSION: "${{ env.K3S_HOST_VERSION }}"
run: make test-cli
- name: Convert coverage data

View File

@@ -83,7 +83,7 @@ generate: ## Generate the CRDs specs
docs: ## Build the CRDs and CLI docs
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
--renderer=markdown \
--source-path=./pkg/apis/k3k.io/v1alpha1 \
--source-path=./pkg/apis/k3k.io/v1beta1 \
--output-path=./docs/crds/crd-docs.md
@go run ./docs/cli/genclidoc.go

View File

@@ -71,7 +71,7 @@ To install it, simply download the latest available version for your architectur
For example, you can download the Linux amd64 version with:
```
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.4/k3kcli-linux-amd64 && \
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.5/k3kcli-linux-amd64 && \
chmod +x k3kcli && \
sudo mv k3kcli /usr/local/bin
```
@@ -79,7 +79,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v0.3.4/k3kcli-l
You should now be able to run:
```bash
-> % k3kcli --version
k3kcli version v0.3.4
k3kcli version v0.3.5
```
@@ -135,7 +135,7 @@ You can also directly create a Cluster resource in some namespace, to create a K
```bash
kubectl apply -f - <<EOF
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.3.4
appVersion: v0.3.4
version: 1.0.0-rc1
appVersion: v1.0.0-rc1

View File

@@ -24,7 +24,7 @@ spec:
- jsonPath: .status.policyName
name: Policy
type: string
name: v1alpha1
name: v1beta1
schema:
openAPIV3Schema:
description: |-
@@ -410,7 +410,7 @@ spec:
This field is only relevant in "dynamic" mode.
type: string
storageRequestSize:
default: 1G
default: 2G
description: |-
StorageRequestSize is the requested size for the PVC.
This field is only relevant in "dynamic" mode.

View File

@@ -20,7 +20,7 @@ spec:
- jsonPath: .spec.allowedMode
name: Mode
type: string
name: v1alpha1
name: v1beta1
schema:
openAPIV3Schema:
description: |-

View File

@@ -6,7 +6,7 @@ metadata:
{{- include "k3k.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
replicas: {{ .Values.controller.replicaCount }}
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
{{- include "k3k.selectorLabels" . | nindent 6 }}
@@ -20,51 +20,35 @@ spec:
- image: "{{- include "controller.registry" .}}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
name: {{ .Chart.Name }}
{{- with .Values.controller.resources }}
resources:
requests:
cpu: {{ .Values.controller.resources.requests.cpu }}
memory: {{ .Values.controller.resources.requests.memory }}
limits:
{{ if .Values.controller.resources.limits.cpu }}
cpu: {{ .Values.controller.resources.limits.cpu }}
{{ end }}
{{ if .Values.controller.resources.limits.memory }}
memory: {{ .Values.controller.resources.limits.memory }}
{{ end}}
{{- toYaml . | nindent 12 }}
{{- end }}
args:
- k3k
- --cluster-cidr={{ .Values.host.clusterCIDR }}
- --k3s-server-image={{- include "server.registry" .}}{{ .Values.server.image.repository }}
- --k3s-server-image-pull-policy={{ .Values.server.image.pullPolicy }}
- --agent-shared-image={{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }}
- --agent-shared-image-pull-policy={{ .Values.agent.shared.image.pullPolicy }}
- --agent-virtual-image={{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}
- --agent-virtual-image-pull-policy={{ .Values.agent.virtual.image.pullPolicy }}
- --kubelet-port-range={{ .Values.agent.shared.kubeletPortRange }}
- --webhook-port-range={{ .Values.agent.shared.webhookPortRange }}
{{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
- --agent-image-pull-secret
- --agent-image-pull-secret
- {{ .name }}
{{- end }}
{{- range $key, $value := include "image.pullSecrets" (concat .Values.server.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }}
- --server-image-pull-secret
- --server-image-pull-secret
- {{ .name }}
{{- end }}
env:
- name: CLUSTER_CIDR
value: {{ .Values.host.clusterCIDR }}
- name: SHARED_AGENT_IMAGE
value: "{{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }}"
- name: SHARED_AGENT_IMAGE_PULL_POLICY
value: {{ .Values.agent.shared.image.pullPolicy }}
- name: VIRTUAL_AGENT_IMAGE
value: "{{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }}"
- name: VIRTUAL_AGENT_IMAGE_PULL_POLICY
value: {{ .Values.agent.virtual.image.pullPolicy }}
- name: K3S_SERVER_IMAGE
value: "{{- include "server.registry" .}}{{ .Values.server.image.repository }}"
- name: K3S_SERVER_IMAGE_PULL_POLICY
value: {{ .Values.server.image.pullPolicy }}
- name: KUBELET_PORT_RANGE
value: {{ .Values.agent.shared.kubeletPortRange }}
- name: WEBHOOK_PORT_RANGE
value: {{ .Values.agent.shared.webhookPortRange }}
- name: CONTROLLER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.extraEnv }}
{{- with .Values.controller.extraEnv }}
{{- toYaml . | nindent 10 }}
{{- end }}
ports:

View File

@@ -20,13 +20,15 @@ host:
clusterCIDR: ""
controller:
replicaCount: 1
replicas: 1
image:
registry: ""
repository: rancher/k3k
tag: ""
pullPolicy: ""
imagePullSecrets: []
# extraEnv allows you to specify additional environment variables for the k3k controller deployment.
# This is useful for passing custom configuration or secrets to the controller.
# For example:
@@ -39,35 +41,16 @@ controller:
# name: my-secret
# key: my-key
extraEnv: []
# resources limits and requests allows you to set resources limits and requests for CPU and Memory
resources:
requests:
cpu: "100m"
memory: "100Mi"
limits:
cpu: ""
memory: ""
# configuration related to the agent component in k3k
agent:
imagePullSecrets: []
# configuration related to agent in shared mode
shared:
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
kubeletPortRange: "50000-51000"
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
webhookPortRange: "51001-52000"
image:
registry: ""
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""
# configuration related to agent in virtual mode
virtual:
image:
registry: ""
repository: "rancher/k3s"
pullPolicy: ""
# resources allows you to set resources limits and requests for CPU and Memory
# resources:
# limits:
# cpu: "200m"
# memory: "200Mi"
# requests:
# cpu: "100m"
# memory: "100Mi"
resources: {}
# configuration related to k3s server component in k3k
server:
@@ -76,3 +59,27 @@ server:
registry:
repository: "rancher/k3s"
pullPolicy: ""
# configuration related to the agent component in k3k
agent:
imagePullSecrets: []
# configuration related to agent in shared mode
shared:
image:
registry: ""
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""
# Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled
kubeletPortRange: "50000-51000"
# Specifies the port range that will be used for webhook if mirrorHostNodes is enabled
webhookPortRange: "51001-52000"
# configuration related to agent in virtual mode
virtual:
image:
registry: ""
repository: "rancher/k3s"
pullPolicy: ""

View File

@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -78,7 +78,7 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
return errors.New("invalid cluster name")
}
if config.mode == string(v1alpha1.SharedClusterMode) && config.agents != 0 {
if config.mode == string(v1beta1.SharedClusterMode) && config.agents != 0 {
return errors.New("invalid flag, --agents flag is only allowed in virtual mode")
}
@@ -114,8 +114,8 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
cluster := newCluster(name, namespace, config)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
cluster.Spec.Expose = &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
}
// add Host IP address as an extra TLS-SAN to expose the k3k cluster
@@ -169,17 +169,17 @@ func createAction(appCtx *AppContext, config *CreateConfig) func(cmd *cobra.Comm
}
}
func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster {
cluster := &v1alpha1.Cluster{
func newCluster(name, namespace string, config *CreateConfig) *v1beta1.Cluster {
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "k3k.io/v1alpha1",
APIVersion: "k3k.io/v1beta1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Servers: ptr.To(int32(config.servers)),
Agents: ptr.To(int32(config.agents)),
ClusterCIDR: config.clusterCIDR,
@@ -189,9 +189,9 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
ServerEnvs: env(config.serverEnvs),
AgentEnvs: env(config.agentEnvs),
Version: config.version,
Mode: v1alpha1.ClusterMode(config.mode),
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.PersistenceMode(config.persistenceType),
Mode: v1beta1.ClusterMode(config.mode),
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.PersistenceMode(config.persistenceType),
StorageClassName: ptr.To(config.storageClassName),
StorageRequestSize: config.storageRequestSize,
},
@@ -210,25 +210,25 @@ func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster
}
if config.customCertsPath != "" {
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
cluster.Spec.CustomCAs = v1beta1.CustomCAs{
Enabled: true,
Sources: v1alpha1.CredentialSources{
ClientCA: v1alpha1.CredentialSource{
Sources: v1beta1.CredentialSources{
ClientCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "client-ca"),
},
ServerCA: v1alpha1.CredentialSource{
ServerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "server-ca"),
},
ETCDServerCA: v1alpha1.CredentialSource{
ETCDServerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-server-ca"),
},
ETCDPeerCA: v1alpha1.CredentialSource{
ETCDPeerCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "etcd-peer-ca"),
},
RequestHeaderCA: v1alpha1.CredentialSource{
RequestHeaderCA: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "request-header-ca"),
},
ServiceAccountToken: v1alpha1.CredentialSource{
ServiceAccountToken: v1beta1.CredentialSource{
SecretName: controller.SafeConcatNameWithPrefix(cluster.Name, "service-account-token"),
},
},
@@ -256,7 +256,7 @@ func env(envSlice []string) []v1.EnvVar {
return envVars
}
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alpha1.Cluster) error {
func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1beta1.Cluster) error {
interval := 5 * time.Second
timeout := 2 * time.Minute
@@ -267,12 +267,12 @@ func waitForCluster(ctx context.Context, k8sClient client.Client, cluster *v1alp
}
// If resource ready -> stop polling
if cluster.Status.Phase == v1alpha1.ClusterReady {
if cluster.Status.Phase == v1beta1.ClusterReady {
return true, nil
}
// If resource failed -> stop polling with an error
if cluster.Status.Phase == v1alpha1.ClusterFailed {
if cluster.Status.Phase == v1beta1.ClusterFailed {
return true, fmt.Errorf("cluster creation failed: %s", cluster.Status.Phase)
}

View File

@@ -6,7 +6,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
@@ -16,7 +16,7 @@ func createFlags(cmd *cobra.Command, cfg *CreateConfig) {
cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR")
cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR")
cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1alpha1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1beta1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral, static)")
cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type")
cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type")
cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments")
@@ -36,8 +36,8 @@ func validateCreateConfig(cfg *CreateConfig) error {
}
if cfg.persistenceType != "" {
switch v1alpha1.PersistenceMode(cfg.persistenceType) {
case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode:
switch v1beta1.PersistenceMode(cfg.persistenceType) {
case v1beta1.EphemeralPersistenceMode, v1beta1.DynamicPersistenceMode:
return nil
default:
return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`)
@@ -50,7 +50,7 @@ func validateCreateConfig(cfg *CreateConfig) error {
if cfg.mode != "" {
switch cfg.mode {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)

View File

@@ -14,7 +14,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcluster "github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
@@ -50,7 +50,7 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace)
cluster := v1alpha1.Cluster{
cluster := v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
@@ -86,7 +86,7 @@ func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
}
}
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error {
func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1beta1.Cluster) error {
var secret v1.Secret
key := types.NamespacedName{

View File

@@ -10,7 +10,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
@@ -32,7 +32,7 @@ func list(appCtx *AppContext) func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
client := appCtx.Client
var clusters v1alpha1.ClusterList
var clusters v1beta1.ClusterList
if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil {
return err
}

View File

@@ -18,7 +18,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -83,7 +83,7 @@ func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra
Namespace: appCtx.Namespace(cfg.name),
}
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := client.Get(ctx, clusterKey, &cluster); err != nil {
return err
@@ -128,7 +128,7 @@ func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra
}
}
func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
func writeKubeconfigFile(cluster *v1beta1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error {
if configName == "" {
configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml"
}

View File

@@ -13,7 +13,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
)
@@ -30,7 +30,7 @@ func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
Example: "k3kcli policy create [command options] NAME",
PreRunE: func(cmd *cobra.Command, args []string) error {
switch config.mode {
case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode):
case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode):
return nil
default:
return errors.New(`mode should be one of "shared" or "virtual"`)
@@ -51,7 +51,7 @@ func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateCo
client := appCtx.Client
policyName := args[0]
_, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName)
_, err := createPolicy(ctx, client, v1beta1.ClusterMode(config.mode), policyName)
return err
}
@@ -81,18 +81,18 @@ func createNamespace(ctx context.Context, client client.Client, name, policyName
return nil
}
func createPolicy(ctx context.Context, client client.Client, mode v1alpha1.ClusterMode, policyName string) (*v1alpha1.VirtualClusterPolicy, error) {
func createPolicy(ctx context.Context, client client.Client, mode v1beta1.ClusterMode, policyName string) (*v1beta1.VirtualClusterPolicy, error) {
logrus.Infof("Creating policy [%s]", policyName)
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: policyName,
},
TypeMeta: metav1.TypeMeta{
Kind: "VirtualClusterPolicy",
APIVersion: "k3k.io/v1alpha1",
APIVersion: "k3k.io/v1beta1",
},
Spec: v1alpha1.VirtualClusterPolicySpec{
Spec: v1beta1.VirtualClusterPolicySpec{
AllowedMode: mode,
},
}

View File

@@ -8,7 +8,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
@@ -27,7 +27,7 @@ func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []stri
client := appCtx.Client
name := args[0]
policy := &v1alpha1.VirtualClusterPolicy{}
policy := &v1beta1.VirtualClusterPolicy{}
policy.Name = name
if err := client.Delete(ctx, policy); err != nil {

View File

@@ -9,7 +9,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
@@ -27,7 +27,7 @@ func policyList(appCtx *AppContext) func(cmd *cobra.Command, args []string) erro
ctx := context.Background()
client := appCtx.Client
var policies v1alpha1.VirtualClusterPolicyList
var policies v1beta1.VirtualClusterPolicyList
if err := client.List(ctx, &policies); err != nil {
return err
}

View File

@@ -16,7 +16,7 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/buildinfo"
)
@@ -51,7 +51,7 @@ func NewRootCmd() *cobra.Command {
scheme := runtime.NewScheme()
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
_ = v1beta1.AddToScheme(scheme)
_ = apiextensionsv1.AddToScheme(scheme)
ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme})

View File

@@ -25,7 +25,7 @@ func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []a
}
for _, version := range crd.Spec.Versions {
if version.Name == "v1alpha1" {
if version.Name == "v1beta1" {
printerColumns = append(printerColumns, version.AdditionalPrinterColumns...)
break
}

View File

@@ -22,7 +22,7 @@ This example creates a "shared" mode K3k cluster with:
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: my-virtual-cluster

View File

@@ -1,10 +1,10 @@
# API Reference
## Packages
- [k3k.io/v1alpha1](#k3kiov1alpha1)
- [k3k.io/v1beta1](#k3kiov1beta1)
## k3k.io/v1alpha1
## k3k.io/v1beta1
### Resource Types
@@ -47,7 +47,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `Cluster` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ClusterSpec](#clusterspec)_ | Spec defines the desired state of the Cluster. | \{ \} | |
@@ -65,7 +65,7 @@ ClusterList is a list of Cluster resources.
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `ClusterList` | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[Cluster](#cluster) array_ | | | |
@@ -313,7 +313,7 @@ _Appears in:_
| --- | --- | --- | --- |
| `type` _[PersistenceMode](#persistencemode)_ | Type specifies the persistence mode. | dynamic | |
| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC.<br />This field is only relevant in "dynamic" mode. | | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 1G | |
| `storageRequestSize` _string_ | StorageRequestSize is the requested size for the PVC.<br />This field is only relevant in "dynamic" mode. | 2G | |
#### PersistenceMode
@@ -447,7 +447,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `VirtualClusterPolicy` | | |
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[VirtualClusterPolicySpec](#virtualclusterpolicyspec)_ | Spec defines the desired state of the VirtualClusterPolicy. | \{ \} | |
@@ -465,7 +465,7 @@ VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `apiVersion` _string_ | `k3k.io/v1alpha1` | | |
| `apiVersion` _string_ | `k3k.io/v1beta1` | | |
| `kind` _string_ | `VirtualClusterPolicyList` | | |
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[VirtualClusterPolicy](#virtualclusterpolicy) array_ | | | |

View File

@@ -130,7 +130,7 @@ Create then the virtual cluster exposing through NodePort one of the ports that
```bash
cat <<EOF | kubectl apply -f -
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: mycluster

View File

@@ -17,7 +17,7 @@ This guide walks through the various ways to create and manage virtual clusters
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ingress
@@ -46,7 +46,7 @@ This will create a virtual cluster in `shared` mode and expose it via an ingress
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-persistent
@@ -80,7 +80,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ha
@@ -105,7 +105,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-virtual
@@ -136,7 +136,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-ephemeral
@@ -162,7 +162,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-custom-k8s
@@ -189,7 +189,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-resourced
@@ -216,7 +216,7 @@ This configures the CPU and memory limit for the virtual cluster.
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-node-placed
@@ -259,7 +259,7 @@ k3kcli cluster create \
### CRD Method
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: k3kcluster-http-proxy

View File

@@ -37,7 +37,7 @@ If you create a `VirtualClusterPolicy` without specifying any `spec` fields (e.g
```yaml
# Example of a minimal VCP (after creation with defaults)
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: my-default-policy
@@ -56,7 +56,7 @@ You can restrict the `mode` (e.g., "shared" or "virtual") in which K3k `Cluster`
**Example:** Allow only "shared" mode clusters.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: shared-only-policy
@@ -74,7 +74,7 @@ You can define resource consumption limits for bound Namespaces by specifying a
**Example:** Set CPU, memory, and pod limits.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: quota-policy
@@ -93,7 +93,7 @@ You can define default resource requests/limits and min/max constraints for cont
**Example:** Define default CPU requests/limits and min/max CPU.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: limit-policy
@@ -118,7 +118,7 @@ By default, K3k creates a `NetworkPolicy` in bound Namespaces to provide network
**Example:** Disable the default NetworkPolicy.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: no-default-netpol-policy
@@ -133,7 +133,7 @@ You can enforce Pod Security Standards (PSS) by specifying a Pod Security Admiss
**Example:** Enforce the "baseline" PSS level.
```yaml
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: baseline-psa-policy

View File

@@ -1,4 +1,4 @@
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: VirtualClusterPolicy
metadata:
name: policy-example

View File

@@ -1,4 +1,4 @@
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: example1

View File

@@ -1,4 +1,4 @@
apiVersion: k3k.io/v1alpha1
apiVersion: k3k.io/v1beta1
kind: Cluster
metadata:
name: single-server

21
go.mod
View File

@@ -11,6 +11,7 @@ replace (
)
require (
github.com/go-logr/logr v1.4.2
github.com/go-logr/zapr v1.3.0
github.com/google/go-cmp v0.7.0
github.com/onsi/ginkgo/v2 v2.21.0
@@ -42,16 +43,6 @@ require (
sigs.k8s.io/controller-runtime v0.19.4
)
require (
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
)
require (
dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
@@ -64,6 +55,7 @@ require (
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -98,13 +90,13 @@ require (
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -161,6 +153,7 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -171,13 +164,17 @@ require (
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
@@ -218,7 +215,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/klog/v2 v2.130.1
k8s.io/kms v0.31.4 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
oras.land/oras-go v1.2.5 // indirect

View File

@@ -16,7 +16,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -57,7 +57,7 @@ func AddConfigMapSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, c
}
func (c *ConfigMapSyncer) filterResources(object client.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -86,7 +86,7 @@ func (c *ConfigMapSyncer) Reconcile(ctx context.Context, req reconcile.Request)
log := ctrl.LoggerFrom(ctx).WithValues("cluster", c.ClusterName, "clusterNamespace", c.ClusterName)
ctx = ctrl.LoggerInto(ctx, log)
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := c.HostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err

View File

@@ -12,7 +12,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -21,7 +21,7 @@ import (
var ConfigMapTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -35,14 +35,14 @@ var ConfigMapTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
ConfigMaps: v1alpha1.ConfigMapSyncConfig{
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
ConfigMaps: v1beta1.ConfigMapSyncConfig{
Enabled: true,
},
},

View File

@@ -16,7 +16,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -53,7 +53,7 @@ func AddIngressSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clu
}
func (r *IngressReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -85,7 +85,7 @@ func (r *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request
var (
virtIngress networkingv1.Ingress
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {

View File

@@ -14,7 +14,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -23,7 +23,7 @@ import (
var IngressTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -37,14 +37,14 @@ var IngressTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
Ingresses: v1alpha1.IngressSyncConfig{
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
Ingresses: v1beta1.IngressSyncConfig{
Enabled: true,
},
},

View File

@@ -16,7 +16,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -53,7 +53,7 @@ func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, cluster
}
func (r *PVCReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -83,7 +83,7 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
var (
virtPVC v1.PersistentVolumeClaim
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {

View File

@@ -13,7 +13,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -22,7 +22,7 @@ import (
var PVCTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -36,14 +36,14 @@ var PVCTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
PersistentVolumeClaims: v1alpha1.PersistentVolumeClaimSyncConfig{
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
PersistentVolumeClaims: v1beta1.PersistentVolumeClaimSyncConfig{
Enabled: true,
},
},

View File

@@ -15,7 +15,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -50,7 +50,7 @@ func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager,
}
func (r *PodReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -71,7 +71,7 @@ func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
var (
virtPod v1.Pod
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {

View File

@@ -13,7 +13,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -22,7 +22,7 @@ import (
var PriorityClassTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -36,14 +36,14 @@ var PriorityClassTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
PriorityClasses: v1alpha1.PriorityClassSyncConfig{
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
PriorityClasses: v1beta1.PriorityClassSyncConfig{
Enabled: true,
},
},

View File

@@ -18,7 +18,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -74,7 +74,7 @@ var ignoreSystemPrefixPredicate = predicate.Funcs{
}
func (r *PriorityClassSyncer) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -104,7 +104,7 @@ func (r *PriorityClassSyncer) Reconcile(ctx context.Context, req reconcile.Reque
var (
priorityClass schedulingv1.PriorityClass
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {

View File

@@ -16,7 +16,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -57,7 +57,7 @@ func AddSecretSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clus
}
func (r *SecretSyncer) filterResources(object client.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()
@@ -86,7 +86,7 @@ func (s *SecretSyncer) Reconcile(ctx context.Context, req reconcile.Request) (re
log := ctrl.LoggerFrom(ctx).WithValues("cluster", s.ClusterName, "clusterNamespace", s.ClusterName)
ctx = ctrl.LoggerInto(ctx, log)
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := s.HostClient.Get(ctx, types.NamespacedName{Name: s.ClusterName, Namespace: s.ClusterNamespace}, &cluster); err != nil {
return reconcile.Result{}, err

View File

@@ -12,7 +12,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -21,7 +21,7 @@ import (
var SecretTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -35,14 +35,14 @@ var SecretTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
Secrets: v1alpha1.SecretSyncConfig{
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
Secrets: v1beta1.SecretSyncConfig{
Enabled: true,
},
},

View File

@@ -16,7 +16,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -63,7 +63,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
var (
virtService v1.Service
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
@@ -120,7 +120,7 @@ func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
func (r *ServiceReconciler) filterResources(object ctrlruntimeclient.Object) bool {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
ctx := context.Background()

View File

@@ -13,7 +13,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -22,7 +22,7 @@ import (
var ServiceTests = func() {
var (
namespace string
cluster v1alpha1.Cluster
cluster v1beta1.Cluster
)
BeforeEach(func() {
@@ -36,14 +36,14 @@ var ServiceTests = func() {
namespace = ns.Name
cluster = v1alpha1.Cluster{
cluster = v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Sync: &v1alpha1.SyncConfig{
Services: v1alpha1.ServiceSyncConfig{
Spec: v1beta1.ClusterSpec{
Sync: &v1beta1.SyncConfig{
Services: v1beta1.ServiceSyncConfig{
Enabled: true,
},
},

View File

@@ -20,7 +20,7 @@ import (
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -119,7 +119,7 @@ func buildScheme() *runtime.Scheme {
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
@@ -174,7 +174,7 @@ var _ = Describe("Kubelet Controller", func() {
Describe("PersistentVolumeClaim Syncer", PVCTests)
})
func translateName(cluster v1alpha1.Cluster, namespace, name string) string {
func translateName(cluster v1beta1.Cluster, namespace, name string) string {
translator := translate.ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,

View File

@@ -7,6 +7,7 @@ import (
"strconv"
"strings"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
@@ -20,11 +21,10 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/log"
)
const (
webhookName = "podmutator.k3k.io"
webhookName = "podmutating.k3k.io"
webhookTimeout = int32(10)
webhookPath = "/mutate--v1-pod"
FieldpathField = "k3k.io/fieldpath"
@@ -36,14 +36,14 @@ type webhookHandler struct {
serviceName string
clusterName string
clusterNamespace string
logger *log.Logger
logger logr.Logger
webhookPort int
}
// AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to
// AddPodMutatingWebhook will add a mutating webhook to the virtual cluster to
// modify the nodeName of the created pods with the name of the virtual kubelet node name
// as well as remove any status fields of the downward apis env fields
func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger, webhookPort int) error {
func AddPodMutatingWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger logr.Logger, webhookPort int) error {
handler := webhookHandler{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
@@ -54,7 +54,7 @@ func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient c
webhookPort: webhookPort,
}
// create mutator webhook configuration to the cluster
// create mutating webhook configuration to the cluster
config, err := handler.configuration(ctx, hostClient)
if err != nil {
return err
@@ -75,7 +75,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
return fmt.Errorf("invalid request: object was type %t not cluster", obj)
}
w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace)
w.logger.Info("mutating webhook request", "pod", pod.Name, "namespace", pod.Namespace)
// look for status.* fields in the env
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
@@ -100,7 +100,7 @@ func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error
}
func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
w.logger.Infow("extracting webhook tls from host cluster")
w.logger.Info("extracting webhook tls from host cluster")
var webhookTLSSecret v1.Secret

View File

@@ -11,11 +11,11 @@ import (
"os"
"time"
"github.com/go-logr/zapr"
"github.com/go-logr/logr"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/log/klogv2"
"github.com/virtual-kubelet/virtual-kubelet/node"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
@@ -23,6 +23,7 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
@@ -38,26 +39,22 @@ import (
"github.com/rancher/k3k/k3k-kubelet/controller/syncer"
k3kwebhook "github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
k3klog "github.com/rancher/k3k/pkg/log"
)
var (
baseScheme = runtime.NewScheme()
k3kKubeletName = "k3k-kubelet"
)
var baseScheme = runtime.NewScheme()
func init() {
_ = clientgoscheme.AddToScheme(baseScheme)
_ = v1alpha1.AddToScheme(baseScheme)
_ = v1beta1.AddToScheme(baseScheme)
}
type kubelet struct {
virtualCluster v1alpha1.Cluster
virtualCluster v1beta1.Cluster
name string
port int
@@ -70,11 +67,11 @@ type kubelet struct {
hostMgr manager.Manager
virtualMgr manager.Manager
node *nodeutil.Node
logger *k3klog.Logger
logger logr.Logger
token string
}
func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet, error) {
func newKubelet(ctx context.Context, c *config, logger logr.Logger) (*kubelet, error) {
hostConfig, err := clientcmd.BuildConfigFromFlags("", c.HostKubeconfig)
if err != nil {
return nil, err
@@ -97,7 +94,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, err
}
ctrl.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
ctrl.SetLogger(logger)
hostMetricsBindAddress := ":8083"
virtualMetricsBindAddress := ":8084"
@@ -150,10 +147,10 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, errors.New("unable to create controller-runtime mgr for virtual cluster: " + err.Error())
}
logger.Info("adding pod mutator webhook")
logger.Info("adding pod mutating webhook")
if err := k3kwebhook.AddPodMutatorWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
return nil, errors.New("unable to add pod mutator webhook for virtual cluster: " + err.Error())
if err := k3kwebhook.AddPodMutatingWebhook(ctx, virtualMgr, hostClient, c.ClusterName, c.ClusterNamespace, c.ServiceName, logger, c.WebhookPort); err != nil {
return nil, errors.New("unable to add pod mutating webhook for virtual cluster: " + err.Error())
}
if err := addControllers(ctx, hostMgr, virtualMgr, c, hostClient); err != nil {
@@ -173,7 +170,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
return nil, errors.New("failed to get the DNS service for the cluster: " + err.Error())
}
var virtualCluster v1alpha1.Cluster
var virtualCluster v1beta1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Name: c.ClusterName, Namespace: c.ClusterNamespace}, &virtualCluster); err != nil {
return nil, errors.New("failed to get virtualCluster spec: " + err.Error())
}
@@ -189,7 +186,7 @@ func newKubelet(ctx context.Context, c *config, logger *k3klog.Logger) (*kubelet
hostMgr: hostMgr,
virtualMgr: virtualMgr,
agentIP: clusterIP,
logger: logger.Named(k3kKubeletName),
logger: logger,
token: c.Token,
dnsIP: dnsService.Spec.ClusterIP,
port: c.KubeletPort,
@@ -211,9 +208,9 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
return service.Spec.ClusterIP, nil
}
func (k *kubelet) registerNode(ctx context.Context, agentIP string, cfg config) error {
func (k *kubelet) registerNode(agentIP string, cfg config) error {
providerFunc := k.newProviderFunc(cfg)
nodeOpts := k.nodeOpts(ctx, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
nodeOpts := k.nodeOpts(cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, agentIP)
var err error
@@ -231,34 +228,36 @@ func (k *kubelet) start(ctx context.Context) {
go func() {
err := k.hostMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("host manager stopped", zap.Error(err))
k.logger.Error(err, "host manager stopped")
}
}()
go func() {
err := k.virtualMgr.Start(ctx)
if err != nil {
k.logger.Fatalw("virtual manager stopped", zap.Error(err))
k.logger.Error(err, "virtual manager stopped")
}
}()
// run the node async so that we can wait for it to be ready in another call
go func() {
ctx = log.WithLogger(ctx, k.logger)
klog.SetLogger(k.logger)
ctx = log.WithLogger(ctx, klogv2.New(nil))
if err := k.node.Run(ctx); err != nil {
k.logger.Fatalw("node errored when running", zap.Error(err))
k.logger.Error(err, "node errored when running")
}
}()
if err := k.node.WaitReady(context.Background(), time.Minute*1); err != nil {
k.logger.Fatalw("node was not ready within timeout of 1 minute", zap.Error(err))
k.logger.Error(err, "node was not ready within timeout of 1 minute")
}
<-k.node.Done()
if err := k.node.Err(); err != nil {
k.logger.Fatalw("node stopped with an error", zap.Error(err))
k.logger.Error(err, "node stopped with an error")
}
k.logger.Info("node exited successfully")
@@ -277,7 +276,7 @@ func (k *kubelet) newProviderFunc(cfg config) nodeutil.NewProviderFunc {
}
}
func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
func (k *kubelet) nodeOpts(srvPort int, namespace, name, hostname, agentIP string) nodeutil.NodeOpt {
return func(c *nodeutil.NodeConfig) error {
c.HTTPListenAddr = fmt.Sprintf(":%d", srvPort)
// set up the routes
@@ -288,7 +287,7 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, ho
c.Handler = mux
tlsConfig, err := loadTLSConfig(ctx, k.hostClient, name, namespace, k.name, hostname, k.token, agentIP)
tlsConfig, err := loadTLSConfig(name, namespace, k.name, hostname, k.token, agentIP)
if err != nil {
return errors.New("unable to get tls config: " + err.Error())
}
@@ -299,12 +298,12 @@ func (k *kubelet) nodeOpts(ctx context.Context, srvPort int, namespace, name, ho
}
}
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger *k3klog.Logger) (*rest.Config, error) {
func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, token string, logger logr.Logger) (*rest.Config, error) {
if virtualConfigPath != "" {
return clientcmd.BuildConfigFromFlags("", virtualConfigPath)
}
// virtual kubeconfig file is empty, trying to fetch the k3k cluster kubeconfig
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := hostClient.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, &cluster); err != nil {
return nil, err
}
@@ -318,7 +317,7 @@ func virtRestConfig(ctx context.Context, virtualConfigPath string, hostClient ct
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
logger.Infow("decoded bootstrap", zap.Error(err))
logger.Error(err, "decoded bootstrap")
return err
}); err != nil {
return nil, errors.New("unable to decode bootstrap: " + err.Error())
@@ -369,17 +368,10 @@ func kubeconfigBytes(url string, serverCA, clientCert, clientKey []byte) ([]byte
return clientcmd.Write(*config)
}
func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var (
cluster v1alpha1.Cluster
b *bootstrap.ControlRuntimeBootstrap
)
func loadTLSConfig(clusterName, clusterNamespace, nodeName, hostname, token, agentIP string) (*tls.Config, error) {
var b *bootstrap.ControlRuntimeBootstrap
if err := hostClient.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}, &cluster); err != nil {
return nil, err
}
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(cluster.Name), cluster.Namespace)
endpoint := fmt.Sprintf("%s.%s", server.ServiceName(clusterName), clusterNamespace)
if err := retry.OnError(controller.Backoff, func(err error) bool {
return err != nil
@@ -429,7 +421,7 @@ func loadTLSConfig(ctx context.Context, hostClient ctrlruntimeclient.Client, clu
}
func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c *config, hostClient ctrlruntimeclient.Client) error {
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
objKey := types.NamespacedName{
Namespace: c.ClusterNamespace,

View File

@@ -7,12 +7,12 @@ import (
"os"
"strings"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"go.uber.org/zap"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
@@ -22,8 +22,9 @@ import (
var (
configFile string
cfg config
logger *log.Logger
logger logr.Logger
debug bool
logFormat string
)
func main() {
@@ -34,13 +35,16 @@ func main() {
if err := InitializeConfig(cmd); err != nil {
return err
}
logger = log.New(debug)
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger = zapr.NewLogger(log.New(debug, logFormat))
ctrlruntimelog.SetLogger(logger)
return nil
},
RunE: run,
}
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "json", "Log format (json or console)")
rootCmd.PersistentFlags().StringVar(&cfg.ClusterName, "cluster-name", "", "Name of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.ClusterNamespace, "cluster-namespace", "", "Namespace of the k3k cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", "", "K3S token of the k3k cluster")
@@ -53,7 +57,6 @@ func main() {
rootCmd.PersistentFlags().StringVar(&cfg.ServerIP, "server-ip", "", "Server IP used for registering the virtual kubelet to the cluster")
rootCmd.PersistentFlags().StringVar(&cfg.Version, "version", "", "Version of kubernetes server")
rootCmd.PersistentFlags().StringVar(&configFile, "config", "/opt/rancher/k3k/config.yaml", "Path to k3k-kubelet config file")
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Enable debug logging")
rootCmd.PersistentFlags().BoolVar(&cfg.MirrorHostNodes, "mirror-host-nodes", false, "Mirror real node objects from host cluster")
if err := rootCmd.Execute(); err != nil {
@@ -73,7 +76,7 @@ func run(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to create new virtual kubelet instance: %w", err)
}
if err := k.registerNode(ctx, k.agentIP, cfg); err != nil {
if err := k.registerNode(k.agentIP, cfg); err != nil {
return fmt.Errorf("failed to register new node: %w", err)
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -12,16 +13,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3klog "github.com/rancher/k3k/pkg/log"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string, mirrorHostNodes bool) {
func ConfigureNode(logger logr.Logger, node *corev1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1beta1.Cluster, version string, mirrorHostNodes bool) {
ctx := context.Background()
if mirrorHostNodes {
hostNode, err := coreClient.Nodes().Get(ctx, node.Name, metav1.GetOptions{})
if err != nil {
logger.Fatal("error getting host node for mirroring", err)
logger.Error(err, "error getting host node for mirroring", err)
}
node.Spec = *hostNode.Spec.DeepCopy()
@@ -56,7 +56,7 @@ func ConfigureNode(logger *k3klog.Logger, node *corev1.Node, hostname string, se
go func() {
for range ticker.C {
if err := updateNodeCapacity(ctx, coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil {
logger.Error("error updating node capacity", err)
logger.Error(err, "error updating node capacity")
}
}
}()

View File

@@ -12,6 +12,7 @@ import (
"strings"
"time"
"github.com/go-logr/logr"
"github.com/google/go-cmp/cmp"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"github.com/virtual-kubelet/virtual-kubelet/node/nodeutil"
@@ -31,6 +32,7 @@ import (
dto "github.com/prometheus/client_model/go"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cv1 "k8s.io/client-go/kubernetes/typed/core/v1"
compbasemetrics "k8s.io/component-base/metrics"
@@ -39,9 +41,8 @@ import (
"github.com/rancher/k3k/k3k-kubelet/controller/webhook"
"github.com/rancher/k3k/k3k-kubelet/provider/collectors"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
k3klog "github.com/rancher/k3k/pkg/log"
)
// check at compile time if the Provider implements the nodeutil.Provider interface
@@ -60,12 +61,12 @@ type Provider struct {
ClusterName string
serverIP string
dnsIP string
logger *k3klog.Logger
logger logr.Logger
}
var ErrRetryTimeout = errors.New("provider timed out")
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger *k3klog.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger logr.Logger, namespace, name, serverIP, dnsIP string) (*Provider, error) {
coreClient, err := cv1.NewForConfig(&hostConfig)
if err != nil {
return nil, err
@@ -124,7 +125,7 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
}
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
p.logger.Infof("got error %s when getting logs for %s in %s", err, hostPodName, p.ClusterNamespace)
p.logger.Error(err, fmt.Sprintf("got error when getting logs for %s in %s", hostPodName, p.ClusterNamespace))
return closer, err
}
@@ -198,7 +199,7 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
// GetStatsSummary gets the stats for the node, including running pods
func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
p.logger.Debug("GetStatsSummary")
p.logger.V(1).Info("GetStatsSummary")
nodeList := &corev1.NodeList{}
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
@@ -346,7 +347,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
Name: p.ClusterName,
}
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
@@ -402,7 +403,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
// inject networking information to the pod including the virtual cluster controlplane endpoint
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
p.logger.Infow("creating pod",
p.logger.Info("creating pod",
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
)
@@ -488,7 +489,7 @@ func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
}
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Debugw("got a request for update pod")
p.logger.V(1).Info("got a request for update pod")
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
// See: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement
@@ -518,7 +519,7 @@ func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, &currentHostPod, metav1.UpdateOptions{}); err != nil {
p.logger.Errorf("error when updating ephemeral containers: %v", err)
p.logger.Error(err, "error when updating ephemeral containers")
return err
}
@@ -589,15 +590,20 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
// expected to call the NotifyPods callback with a terminal pod status where all the containers are in a terminal
// state, as well as the pod. DeletePod may be called multiple times for the same pod.
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
p.logger.Infof("Got request to delete pod %s", pod.Name)
p.logger.Info(fmt.Sprintf("got request to delete pod %s/%s", pod.Namespace, pod.Name))
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
p.logger.Info(fmt.Sprintf("pod %s/%s already deleted from host cluster", p.ClusterNamespace, hostName))
return nil
}
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
p.logger.Infof("Deleted pod %s", pod.Name)
p.logger.Info(fmt.Sprintf("pod %s/%s deleted from host cluster", p.ClusterNamespace, hostName))
return nil
}
@@ -607,7 +613,7 @@ func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error) {
p.logger.Debugw("got a request for get pod", "Namespace", namespace, "Name", name)
p.logger.V(1).Info("got a request for get pod", "namespace", namespace, "name", name)
hostNamespaceName := types.NamespacedName{
Namespace: p.ClusterNamespace,
Name: p.Translator.TranslateName(namespace, name),
@@ -629,14 +635,14 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
// concurrently outside of the calling goroutine. Therefore it is recommended
// to return a version after DeepCopy.
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
p.logger.Debugw("got a request for pod status", "Namespace", namespace, "Name", name)
p.logger.V(1).Info("got a request for pod status", "namespace", namespace, "name", name)
pod, err := p.GetPod(ctx, namespace, name)
if err != nil {
return nil, fmt.Errorf("unable to get pod for status: %w", err)
}
p.logger.Debugw("got pod status", "Namespace", namespace, "Name", name, "Status", pod.Status)
p.logger.V(1).Info("got pod status", "namespace", namespace, "name", name, "status", pod.Status)
return pod.Status.DeepCopy(), nil
}
@@ -760,7 +766,7 @@ func mergeEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
return orig
}
// configureFieldPathEnv will retrieve all annotations created by the pod mutator webhook
// configureFieldPathEnv will retrieve all annotations created by the pod mutating webhook
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
// assigned annotations
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {

View File

@@ -23,7 +23,7 @@ const (
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
// to look like the serviceaccount token
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
p.logger.Info("transforming token", "pod", pod.Name, "namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
// skip this process if the kube-api-access is already removed from the pod
// this is needed in case users already adds their own custom tokens like in rancher imported clusters

View File

@@ -4,8 +4,10 @@ import (
"encoding/hex"
"strings"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -34,6 +36,13 @@ type ToHostTranslator struct {
ClusterNamespace string
}
func NewHostTranslator(cluster *v1beta1.Cluster) *ToHostTranslator {
return &ToHostTranslator{
ClusterName: cluster.Name,
ClusterNamespace: cluster.Namespace,
}
}
// Translate translates a virtual cluster object to a host cluster object. This should only be used for
// static resources such as configmaps/secrets, and not for things like pods (which can reference other
// objects). Note that this won't set host-cluster values (like resource version) so when updating you
@@ -125,3 +134,11 @@ func (t *ToHostTranslator) TranslateName(namespace string, name string) string {
return controller.SafeConcatName(namePrefix, nameSuffix)
}
// NamespacedName returns the types.NamespacedName of the resource in the host cluster
func (t *ToHostTranslator) NamespacedName(obj client.Object) types.NamespacedName {
return types.NamespacedName{
Namespace: t.ClusterNamespace,
Name: t.TranslateName(obj.GetNamespace(), obj.GetName()),
}
}

47
main.go
View File

@@ -9,9 +9,9 @@ import (
"os/signal"
"syscall"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/spf13/cobra"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/manager"
@@ -22,7 +22,7 @@ import (
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
@@ -38,12 +38,13 @@ var (
webhookPortRange string
maxConcurrentReconciles int
debug bool
logger *log.Logger
logFormat string
logger logr.Logger
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = v1alpha1.AddToScheme(scheme)
_ = v1beta1.AddToScheme(scheme)
}
func main() {
@@ -56,19 +57,20 @@ func main() {
},
PersistentPreRun: func(cmd *cobra.Command, args []string) {
cmds.InitializeConfig(cmd)
logger = log.New(debug)
logger = zapr.NewLogger(log.New(debug, logFormat))
},
RunE: run,
}
rootCmd.PersistentFlags().BoolVar(&debug, "debug", false, "Debug level logging")
rootCmd.PersistentFlags().StringVar(&logFormat, "log-format", "json", "Log format (json or console)")
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig path")
rootCmd.PersistentFlags().StringVar(&config.ClusterCIDR, "cluster-cidr", "", "Cluster CIDR to be added to the networkpolicy")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "shared-agent-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImagePullPolicy, "shared-agent-image-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImage, "agent-shared-image", "rancher/k3k-kubelet", "K3K Virtual Kubelet image")
rootCmd.PersistentFlags().StringVar(&config.SharedAgentImagePullPolicy, "agent-shared-image-pull-policy", "", "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "agent-virtual-image", "rancher/k3s", "K3S Virtual Agent image")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "agent-virtual-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&kubeletPortRange, "kubelet-port-range", "50000-51000", "Port Range for k3k kubelet in shared mode")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImage, "virtual-agent-image", "rancher/k3s", "K3S Virtual Agent image")
rootCmd.PersistentFlags().StringVar(&config.VirtualAgentImagePullPolicy, "virtual-agent-image-pull-policy", "", "K3S Virtual Agent image pull policy must be one of Always, IfNotPresent or Never")
rootCmd.PersistentFlags().StringVar(&webhookPortRange, "webhook-port-range", "51001-52000", "Port Range for k3k kubelet webhook in shared mode")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImage, "k3s-server-image", "rancher/k3s", "K3K server image")
rootCmd.PersistentFlags().StringVar(&config.K3SServerImagePullPolicy, "k3s-server-image-pull-policy", "", "K3K server image pull policy")
@@ -77,7 +79,7 @@ func main() {
rootCmd.PersistentFlags().IntVar(&maxConcurrentReconciles, "max-concurrent-reconciles", 50, "maximum number of concurrent reconciles")
if err := rootCmd.Execute(); err != nil {
logger.Fatalw("failed to run k3k controller", zap.Error(err))
logger.Error(err, "failed to run k3k controller")
}
}
@@ -86,6 +88,7 @@ func run(cmd *cobra.Command, args []string) error {
defer stop()
logger.Info("Starting k3k - Version: " + buildinfo.Version)
ctrlruntimelog.SetLogger(logger)
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
@@ -99,8 +102,6 @@ func run(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to create new controller runtime manager: %v", err)
}
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger.Info("adding cluster controller")
portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient())
@@ -114,23 +115,35 @@ func run(cmd *cobra.Command, args []string) error {
}
if err := cluster.Add(ctx, mgr, &config, maxConcurrentReconciles, portAllocator, nil); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
return fmt.Errorf("failed to add cluster controller: %v", err)
}
logger.Info("adding etcd pod controller")
logger.Info("adding statefulset controller")
if err := cluster.AddStatefulSetController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add statefulset controller: %v", err)
}
logger.Info("adding service controller")
if err := cluster.AddServiceController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add service controller: %v", err)
}
logger.Info("adding pod controller")
if err := cluster.AddPodController(ctx, mgr, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
return fmt.Errorf("failed to add pod controller: %v", err)
}
logger.Info("adding clusterpolicy controller")
if err := policy.Add(mgr, config.ClusterCIDR, maxConcurrentReconciles); err != nil {
return fmt.Errorf("failed to add the clusterpolicy controller: %v", err)
return fmt.Errorf("failed to add clusterpolicy controller: %v", err)
}
if err := mgr.Start(ctx); err != nil {
return fmt.Errorf("failed to start the manager: %v", err)
return fmt.Errorf("failed to start manager: %v", err)
}
logger.Info("controller manager stopped")

View File

@@ -1,3 +1,3 @@
// +k8s:deepcopy-gen=package
// +groupName=k3k.io
package v1alpha1
package v1beta1

View File

@@ -1,4 +1,4 @@
package v1alpha1
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
@@ -10,7 +10,7 @@ import (
)
var (
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1alpha1"}
SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1beta1"}
SchemBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemBuilder.AddToScheme
)

View File

@@ -1,4 +1,4 @@
package v1alpha1
package v1beta1
import (
v1 "k8s.io/api/core/v1"
@@ -343,7 +343,7 @@ type PersistenceConfig struct {
// StorageRequestSize is the requested size for the PVC.
// This field is only relevant in "dynamic" mode.
//
// +kubebuilder:default="1G"
// +kubebuilder:default="2G"
// +optional
StorageRequestSize string `json:"storageRequestSize,omitempty"`
}

View File

@@ -2,7 +2,7 @@
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
package v1beta1
import (
"k8s.io/api/core/v1"

View File

@@ -11,7 +11,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -24,12 +24,12 @@ type ResourceEnsurer interface {
}
type Config struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
client ctrlruntimeclient.Client
scheme *runtime.Scheme
}
func NewConfig(cluster *v1alpha1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
func NewConfig(cluster *v1beta1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config {
return &Config{
cluster: cluster,
client: client,

View File

@@ -19,7 +19,7 @@ import (
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
)
@@ -99,7 +99,7 @@ func (s *SharedAgent) config(ctx context.Context) error {
return s.ensureObject(ctx, configSecret)
}
func sharedAgentData(cluster *v1alpha1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
func sharedAgentData(cluster *v1beta1.Cluster, serviceName, token, ip string, kubeletPort, webhookPort int) string {
version := cluster.Spec.Version
if cluster.Spec.Version == "" {
version = cluster.Status.HostVersion

View File

@@ -8,12 +8,12 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func Test_sharedAgentData(t *testing.T) {
type args struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
serviceName string
ip string
kubeletPort int
@@ -29,12 +29,12 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "simple config",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
},
@@ -59,15 +59,15 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "version in status",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v1.3.3",
},
},
@@ -92,12 +92,12 @@ func Test_sharedAgentData(t *testing.T) {
{
name: "missing version in spec",
args: args{
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v1.3.3",
},
},

View File

@@ -0,0 +1,35 @@
package cluster
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
v1 "k8s.io/api/core/v1"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/controller"
)
// newVirtualClient creates a new Client that can be used to interact with the virtual cluster
func newVirtualClient(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (ctrlruntimeclient.Client, error) {
var clusterKubeConfig v1.Secret
kubeconfigSecretName := types.NamespacedName{
Name: controller.SafeConcatNameWithPrefix(clusterName, "kubeconfig"),
Namespace: clusterNamespace,
}
if err := hostClient.Get(ctx, kubeconfigSecretName, &clusterKubeConfig); err != nil {
return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err)
}
restConfig, err := clientcmd.RESTConfigFromKubeConfig(clusterKubeConfig.Data["kubeconfig.yaml"])
if err != nil {
return nil, fmt.Errorf("failed to create config from kubeconfig file: %w", err)
}
return ctrlruntimeclient.New(restConfig, ctrlruntimeclient.Options{})
}

View File

@@ -33,7 +33,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/server"
@@ -46,7 +46,6 @@ const (
namePrefix = "k3k"
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
ClusterInvalidName = "system"
defaultVirtualClusterCIDR = "10.52.0.0/16"
@@ -118,7 +117,7 @@ func Add(ctx context.Context, mgr manager.Manager, config *Config, maxConcurrent
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.Cluster{}).
For(&v1beta1.Cluster{}).
Watches(&v1.Namespace{}, namespaceEventHandler(&reconciler)).
Owns(&apps.StatefulSet{}).
Owns(&v1.Service{}).
@@ -149,7 +148,7 @@ func namespaceEventHandler(r *ClusterReconciler) handler.Funcs {
}
// Enqueue all the Cluster in the namespace
var clusterList v1alpha1.ClusterList
var clusterList v1beta1.ClusterList
if err := r.Client.List(ctx, &clusterList, client.InNamespace(oldNs.Name)); err != nil {
return
}
@@ -167,7 +166,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
log.Info("reconciling cluster")
var cluster v1alpha1.Cluster
var cluster v1beta1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
@@ -178,8 +177,8 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
// Set initial status if not already set
if cluster.Status.Phase == "" || cluster.Status.Phase == v1alpha1.ClusterUnknown {
cluster.Status.Phase = v1alpha1.ClusterProvisioning
if cluster.Status.Phase == "" || cluster.Status.Phase == v1beta1.ClusterUnknown {
cluster.Status.Phase = v1beta1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -233,14 +232,14 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) reconcileCluster(ctx context.Context, cluster *v1beta1.Cluster) error {
err := c.reconcile(ctx, cluster)
c.updateStatus(cluster, err)
return err
}
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
var ns v1.Namespace
@@ -252,7 +251,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
cluster.Status.PolicyName = policyName
if found && policyName != "" {
var policy v1alpha1.VirtualClusterPolicy
var policy v1beta1.VirtualClusterPolicy
if err := c.Client.Get(ctx, client.ObjectKey{Name: policyName}, &policy); err != nil {
return err
}
@@ -287,7 +286,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
cluster.Status.ClusterCIDR = cluster.Spec.ClusterCIDR
if cluster.Status.ClusterCIDR == "" {
cluster.Status.ClusterCIDR = defaultVirtualClusterCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
cluster.Status.ClusterCIDR = defaultSharedClusterCIDR
}
}
@@ -295,7 +294,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
cluster.Status.ServiceCIDR = cluster.Spec.ServiceCIDR
if cluster.Status.ServiceCIDR == "" {
// in shared mode try to lookup the serviceCIDR
if cluster.Spec.Mode == v1alpha1.SharedClusterMode {
if cluster.Spec.Mode == v1beta1.SharedClusterMode {
log.Info("looking up Service CIDR for shared mode")
cluster.Status.ServiceCIDR, err = c.lookupServiceCIDR(ctx)
@@ -307,7 +306,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
}
// in virtual mode assign a default serviceCIDR
if cluster.Spec.Mode == v1alpha1.VirtualClusterMode {
if cluster.Spec.Mode == v1beta1.VirtualClusterMode {
log.Info("assign default service CIDR for virtual mode")
cluster.Status.ServiceCIDR = defaultVirtualServiceCIDR
@@ -353,7 +352,7 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1alpha1.Clu
}
// ensureBootstrapSecret will create or update the Secret containing the bootstrap data from the k3s server
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring bootstrap secret")
@@ -385,7 +384,7 @@ func (c *ClusterReconciler) ensureBootstrapSecret(ctx context.Context, cluster *
}
// ensureKubeconfigSecret will create or update the Secret containing the kubeconfig data from the k3s server
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP string, port int) error {
func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster *v1beta1.Cluster, serviceIP string, port int) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring kubeconfig secret")
@@ -423,7 +422,7 @@ func (c *ClusterReconciler) ensureKubeconfigSecret(ctx context.Context, cluster
return err
}
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, serviceIP string) error {
func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server, serviceIP string) error {
// create init node config
initServerConfig, err := server.Config(true, serviceIP)
if err != nil {
@@ -459,7 +458,7 @@ func (c *ClusterReconciler) createClusterConfigs(ctx context.Context, cluster *v
return nil
}
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring network policy")
@@ -551,7 +550,7 @@ func (c *ClusterReconciler) ensureNetworkPolicy(ctx context.Context, cluster *v1
return nil
}
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1alpha1.Cluster) (*v1.Service, error) {
func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v1beta1.Cluster) (*v1.Service, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster service")
@@ -579,7 +578,7 @@ func (c *ClusterReconciler) ensureClusterService(ctx context.Context, cluster *v
return currentService, nil
}
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1beta1.Cluster) error {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring cluster ingress")
@@ -615,7 +614,7 @@ func (c *ClusterReconciler) ensureIngress(ctx context.Context, cluster *v1alpha1
return nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1beta1.Cluster, server *server.Server) error {
log := ctrl.LoggerFrom(ctx)
// create headless service for the statefulset
@@ -635,6 +634,9 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
return err
}
// Add the finalizer to the StatefulSet so the statefulset controller can handle cleanup.
controllerutil.AddFinalizer(expectedServerStatefulSet, etcdPodFinalizerName)
currentServerStatefulSet := expectedServerStatefulSet.DeepCopy()
result, err := controllerutil.CreateOrUpdate(ctx, c.Client, currentServerStatefulSet, func() error {
if err := controllerutil.SetControllerReference(cluster, currentServerStatefulSet, c.Scheme); err != nil {
@@ -654,7 +656,7 @@ func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluste
return err
}
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
var err error
@@ -684,7 +686,7 @@ func (c *ClusterReconciler) bindClusterRoles(ctx context.Context, cluster *v1alp
return err
}
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1beta1.Cluster, serviceIP, token string) error {
config := agent.NewConfig(cluster, c.Client, c.Scheme)
var agentEnsurer agent.ResourceEnsurer
@@ -719,7 +721,7 @@ func (c *ClusterReconciler) ensureAgent(ctx context.Context, cluster *v1alpha1.C
return agentEnsurer.EnsureResources(ctx)
}
func (c *ClusterReconciler) validate(cluster *v1alpha1.Cluster, policy v1alpha1.VirtualClusterPolicy) error {
func (c *ClusterReconciler) validate(cluster *v1beta1.Cluster, policy v1beta1.VirtualClusterPolicy) error {
if cluster.Name == ClusterInvalidName {
return fmt.Errorf("%w: invalid cluster name %q", ErrClusterValidation, cluster.Name)
}
@@ -820,7 +822,7 @@ func (c *ClusterReconciler) lookupServiceCIDR(ctx context.Context) (string, erro
}
// validateCustomCACerts will make sure that all the cert secrets exists
func (c *ClusterReconciler) validateCustomCACerts(cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) validateCustomCACerts(cluster *v1beta1.Cluster) error {
credentialSources := cluster.Spec.CustomCAs.Sources
if credentialSources.ClientCA.SecretName == "" ||
credentialSources.ServerCA.SecretName == "" ||

View File

@@ -16,17 +16,17 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alpha1.Cluster) (reconcile.Result, error) {
func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta1.Cluster) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("finalizing Cluster")
// Set the Terminating phase and condition
cluster.Status.Phase = v1alpha1.ClusterTerminating
cluster.Status.Phase = v1beta1.ClusterTerminating
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -39,7 +39,7 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
}
// Deallocate ports for kubelet and webhook if used
if cluster.Spec.Mode == v1alpha1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
if cluster.Spec.Mode == v1beta1.SharedClusterMode && cluster.Spec.MirrorHostNodes {
log.Info("dellocating ports for kubelet and webhook")
if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil {
@@ -61,7 +61,7 @@ func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1alph
return reconcile.Result{}, nil
}
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1alpha1.Cluster) error {
func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error {
clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"}
var err error

View File

@@ -17,7 +17,7 @@ import (
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
@@ -99,7 +99,7 @@ func buildScheme() *runtime.Scheme {
err := clientgoscheme.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme

View File

@@ -12,7 +12,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/server"
@@ -38,7 +38,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("creating a Cluster", func() {
It("will be created with some defaults", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
@@ -48,15 +48,15 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.Mode).To(Equal(v1alpha1.SharedClusterMode))
Expect(cluster.Spec.Mode).To(Equal(v1beta1.SharedClusterMode))
Expect(cluster.Spec.Agents).To(Equal(ptr.To[int32](0)))
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
Expect(cluster.Spec.Version).To(BeEmpty())
Expect(cluster.Spec.Persistence.Type).To(Equal(v1alpha1.DynamicPersistenceMode))
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("1G"))
Expect(cluster.Spec.Persistence.Type).To(Equal(v1beta1.DynamicPersistenceMode))
Expect(cluster.Spec.Persistence.StorageRequestSize).To(Equal("2G"))
Expect(cluster.Status.Phase).To(Equal(v1alpha1.ClusterUnknown))
Expect(cluster.Status.Phase).To(Equal(v1beta1.ClusterUnknown))
serverVersion, err := k8s.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
@@ -92,14 +92,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("exposing the cluster with nodePort", func() {
It("will have a NodePort service", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
},
},
}
@@ -124,14 +124,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
})
It("will have the specified ports exposed when specified", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{
ServerPort: ptr.To[int32](30010),
ETCDPort: ptr.To[int32](30011),
},
@@ -173,14 +173,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
})
It("will not expose the port when out of range", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{
ETCDPort: ptr.To[int32](2222),
},
},
@@ -218,14 +218,14 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("exposing the cluster with loadbalancer", func() {
It("will have a LoadBalancer service with the default ports exposed", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
LoadBalancer: &v1beta1.LoadBalancerConfig{},
},
},
}
@@ -266,15 +266,15 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
When("exposing the cluster with nodePort and loadbalancer", func() {
It("will fail", func() {
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Expose: &v1alpha1.ExposeConfig{
LoadBalancer: &v1alpha1.LoadBalancerConfig{},
NodePort: &v1alpha1.NodePortConfig{},
Spec: v1beta1.ClusterSpec{
Expose: &v1beta1.ExposeConfig{
LoadBalancer: &v1beta1.LoadBalancerConfig{},
NodePort: &v1beta1.NodePortConfig{},
},
},
}

View File

@@ -0,0 +1,38 @@
package cluster
import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func newClusterPredicate() predicate.Predicate {
return predicate.NewPredicateFuncs(func(object client.Object) bool {
owner := metav1.GetControllerOf(object)
return owner != nil &&
owner.Kind == "Cluster" &&
owner.APIVersion == v1beta1.SchemeGroupVersion.String()
})
}
func clusterNamespacedName(object client.Object) types.NamespacedName {
var clusterName string
owner := metav1.GetControllerOf(object)
if owner != nil && owner.Kind == "Cluster" && owner.APIVersion == v1beta1.SchemeGroupVersion.String() {
clusterName = owner.Name
} else {
clusterName = object.GetLabels()[translate.ClusterNameLabel]
}
return types.NamespacedName{
Name: clusterName,
Namespace: object.GetNamespace(),
}
}

View File

@@ -2,36 +2,19 @@ package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strings"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
certutil "github.com/rancher/dynamiclistener/cert"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
"github.com/rancher/k3k/k3k-kubelet/translate"
)
const (
@@ -43,235 +26,56 @@ type PodReconciler struct {
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
// AddPodController adds a new controller for Pods to the manager.
// It will reconcile the Pods of the Host Cluster with the one of the Virtual Cluster.
func AddPodController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
// initialize a new Reconciler
reconciler := PodReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
return ctrl.NewControllerManagedBy(mgr).
Watches(&v1.Pod{}, handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &apps.StatefulSet{}, handler.OnlyControllerOwner())).
For(&v1.Pod{}).
Named(podController).
WithEventFilter(newClusterPredicate()).
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func (p *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx).WithValues("statefulset", req.NamespacedName)
ctx = ctrl.LoggerInto(ctx, log) // enrich the current logger
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling pod")
s := strings.Split(req.Name, "-")
if len(s) < 1 {
return reconcile.Result{}, nil
}
if s[0] != "k3k" {
return reconcile.Result{}, nil
}
clusterName := s[1]
var cluster v1alpha1.Cluster
if err := p.Client.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: req.Namespace}, &cluster); err != nil {
var pod v1.Pod
if err := r.Client.Get(ctx, req.NamespacedName, &pod); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"})
listOpts := &ctrlruntimeclient.ListOptions{Namespace: req.Namespace}
matchingLabels.ApplyToList(listOpts)
var podList v1.PodList
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
if len(podList.Items) == 1 {
return reconcile.Result{}, nil
// get cluster from the object
cluster := clusterNamespacedName(&pod)
virtualClient, err := newVirtualClient(ctx, r.Client, cluster.Name, cluster.Namespace)
if err != nil {
return reconcile.Result{}, err
}
for _, pod := range podList.Items {
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
return reconcile.Result{}, err
if !pod.DeletionTimestamp.IsZero() {
virtName := pod.GetAnnotations()[translate.ResourceNameAnnotation]
virtNamespace := pod.GetAnnotations()[translate.ResourceNamespaceAnnotation]
virtPod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: virtName,
Namespace: virtNamespace,
},
}
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(virtualClient.Delete(ctx, &virtPod))
}
return reconcile.Result{}, nil
}
func (p *PodReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cluster, pod *v1.Pod) error {
log := ctrl.LoggerFrom(ctx)
log.Info("handling server pod")
role, found := pod.Labels["role"]
if !found {
return fmt.Errorf("server pod has no role label")
}
if role != "server" {
log.V(1).Info("pod has a different role: " + role)
return nil
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
if !pod.DeletionTimestamp.IsZero() {
// check if cluster is deleted then remove the finalizer from the pod
if cluster.Name == "" {
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace),
},
TLS: tlsConfig,
})
if err != nil {
return err
}
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
return err
}
// remove our finalizer from the list and update it.
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
}
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
return p.Client.Update(ctx, pod)
}
return nil
}
func (p *PodReconciler) getETCDTLS(ctx context.Context, cluster *v1alpha1.Cluster) (*tls.Config, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("generating etcd TLS client certificate", "cluster", cluster)
token, err := p.clusterToken(ctx, cluster)
if err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
return true
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("removing peer from cluster", "name", name, "address", address)
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
}
return nil
}
func (p *PodReconciler) clusterToken(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
var tokenSecret v1.Secret
nn := types.NamespacedName{
Name: TokenSecretName(cluster.Name),
Namespace: cluster.Namespace,
}
if cluster.Spec.TokenSecretRef != nil {
nn.Name = TokenSecretName(cluster.Name)
}
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}

View File

@@ -16,7 +16,7 @@ import (
v1 "k8s.io/api/core/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -39,7 +39,7 @@ type content struct {
// Generate generates the bootstrap for the cluster:
// 1- use the server token to get the bootstrap data from k3s
// 2- save the bootstrap data as a secret
func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, token string) ([]byte, error) {
func GenerateBootstrapData(ctx context.Context, cluster *v1beta1.Cluster, ip, token string) ([]byte, error) {
bootstrap, err := requestBootstrap(token, ip)
if err != nil {
return nil, fmt.Errorf("failed to request bootstrap secret: %w", err)
@@ -162,7 +162,7 @@ func DecodedBootstrap(token, ip string) (*ControlRuntimeBootstrap, error) {
return bootstrap, nil
}
func GetFromSecret(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster) (*ControlRuntimeBootstrap, error) {
func GetFromSecret(ctx context.Context, client client.Client, cluster *v1beta1.Cluster) (*ControlRuntimeBootstrap, error) {
key := types.NamespacedName{
Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"),
Namespace: cluster.Namespace,

View File

@@ -8,7 +8,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
@@ -45,15 +45,15 @@ func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) {
}, nil
}
func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster, token string) string {
func serverConfigData(serviceIP string, cluster *v1beta1.Cluster, token string) string {
return "cluster-init: true\nserver: https://" + serviceIP + "\n" + serverOptions(cluster, token)
}
func initConfigData(cluster *v1alpha1.Cluster, token string) string {
func initConfigData(cluster *v1beta1.Cluster, token string) string {
return "cluster-init: true\n" + serverOptions(cluster, token)
}
func serverOptions(cluster *v1alpha1.Cluster, token string) string {
func serverOptions(cluster *v1beta1.Cluster, token string) string {
var opts string
// TODO: generate token if not found

View File

@@ -8,7 +8,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
@@ -22,7 +22,7 @@ func IngressName(clusterName string) string {
return controller.SafeConcatNameWithPrefix(clusterName, "ingress")
}
func Ingress(ctx context.Context, cluster *v1alpha1.Cluster) networkingv1.Ingress {
func Ingress(ctx context.Context, cluster *v1beta1.Cluster) networkingv1.Ingress {
ingress := networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{
Kind: "Ingress",
@@ -52,7 +52,7 @@ func Ingress(ctx context.Context, cluster *v1alpha1.Cluster) networkingv1.Ingres
return ingress
}
func ingressRules(cluster *v1alpha1.Cluster) []networkingv1.IngressRule {
func ingressRules(cluster *v1beta1.Cluster) []networkingv1.IngressRule {
var ingressRules []networkingv1.IngressRule
if cluster.Spec.Expose == nil || cluster.Spec.Expose.Ingress == nil {

View File

@@ -18,7 +18,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
)
@@ -32,7 +32,7 @@ const (
// Server
type Server struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
client client.Client
mode string
token string
@@ -41,7 +41,7 @@ type Server struct {
imagePullSecrets []string
}
func New(cluster *v1alpha1.Cluster, client client.Client, token, image, imagePullPolicy string, imagePullSecrets []string) *Server {
func New(cluster *v1beta1.Cluster, client client.Client, token, image, imagePullPolicy string, imagePullSecrets []string) *Server {
return &Server{
cluster: cluster,
client: client,
@@ -265,7 +265,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
replicas = *s.cluster.Spec.Servers
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
if s.cluster.Spec.Persistence.Type == v1beta1.DynamicPersistenceMode {
persistent = true
pvClaim = s.setupDynamicPersistence()
}
@@ -379,7 +379,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
},
},
}
if s.cluster.Spec.Persistence.Type == v1alpha1.DynamicPersistenceMode {
if s.cluster.Spec.Persistence.Type == v1beta1.DynamicPersistenceMode {
ss.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{pvClaim}
}

View File

@@ -6,11 +6,11 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
func Service(cluster *v1alpha1.Cluster) *v1.Service {
func Service(cluster *v1beta1.Cluster) *v1.Service {
service := &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
@@ -69,7 +69,7 @@ func Service(cluster *v1alpha1.Cluster) *v1.Service {
}
// addLoadBalancerPorts adds the load balancer ports to the service
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1beta1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) {
// If the server port is not specified, use the default port
if loadbalancerConfig.ServerPort == nil {
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)
@@ -90,7 +90,7 @@ func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadB
}
// addNodePortPorts adds the node port ports to the service
func addNodePortPorts(service *v1.Service, nodePortConfig v1alpha1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
func addNodePortPorts(service *v1.Service, nodePortConfig v1beta1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) {
// If the server port is not specified Kubernetes will set the node port to a random port between 30000-32767
if nodePortConfig.ServerPort == nil {
service.Spec.Ports = append(service.Spec.Ports, k3sServerPort)

View File

@@ -0,0 +1,90 @@
package cluster
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/k3k-kubelet/translate"
)
const (
serviceController = "k3k-service-controller"
)
type ServiceReconciler struct {
HostClient ctrlruntimeclient.Client
}
// Add adds a new controller to the manager
func AddServiceController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
reconciler := ServiceReconciler{
HostClient: mgr.GetClient(),
}
return ctrl.NewControllerManagedBy(mgr).
Named(serviceController).
For(&v1.Service{}).
WithEventFilter(newClusterPredicate()).
Complete(&reconciler)
}
func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("ensuring service status to virtual cluster")
var hostService v1.Service
if err := r.HostClient.Get(ctx, req.NamespacedName, &hostService); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// Some services are owned by the cluster but don't have the annotations set (i.e. the kubelet svc)
// They don't exists in the virtual cluster, so we can skip them
virtualServiceName, virtualServiceNameFound := hostService.Annotations[translate.ResourceNameAnnotation]
virtualServiceNamespace, virtualServiceNamespaceFound := hostService.Annotations[translate.ResourceNamespaceAnnotation]
if !virtualServiceNameFound || !virtualServiceNamespaceFound {
log.V(1).Info(fmt.Sprintf("service %s/%s does not have virtual service annotations, skipping", hostService.Namespace, hostService.Name))
return reconcile.Result{}, nil
}
// get cluster from the object
cluster := clusterNamespacedName(&hostService)
virtualClient, err := newVirtualClient(ctx, r.HostClient, cluster.Name, cluster.Namespace)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get cluster info: %v", err)
}
if !hostService.DeletionTimestamp.IsZero() {
return reconcile.Result{}, nil
}
virtualServiceKey := types.NamespacedName{
Name: virtualServiceName,
Namespace: virtualServiceNamespace,
}
var virtualService v1.Service
if err := virtualClient.Get(ctx, virtualServiceKey, &virtualService); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get virtual service: %v", err)
}
if !equality.Semantic.DeepEqual(virtualService.Status.LoadBalancer, hostService.Status.LoadBalancer) {
virtualService.Status.LoadBalancer = hostService.Status.LoadBalancer
if err := virtualClient.Status().Update(ctx, &virtualService); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}

View File

@@ -0,0 +1,320 @@
package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strings"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
certutil "github.com/rancher/dynamiclistener/cert"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
const (
statefulsetController = "k3k-statefulset-controller"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
)
type StatefulSetReconciler struct {
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
}
// Add adds a new controller to the manager
func AddStatefulSetController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error {
// initialize a new Reconciler
reconciler := StatefulSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}
return ctrl.NewControllerManagedBy(mgr).
For(&apps.StatefulSet{}).
Owns(&v1.Pod{}).
Named(statefulsetController).
WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}).
Complete(&reconciler)
}
func (p *StatefulSetReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling statefulset")
var sts apps.StatefulSet
if err := p.Client.Get(ctx, req.NamespacedName, &sts); err != nil {
// we can ignore the IsNotFound error
// if the stateful set was deleted we have already cleaned up the pods
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// If the StatefulSet is being deleted, we need to remove the finalizers from its pods
// and remove the finalizer from the StatefulSet itself.
if !sts.DeletionTimestamp.IsZero() {
return p.handleDeletion(ctx, &sts)
}
// get cluster name from the object
clusterKey := clusterNamespacedName(&sts)
var cluster v1beta1.Cluster
if err := p.Client.Get(ctx, clusterKey, &cluster); err != nil {
if !apierrors.IsNotFound(err) {
return reconcile.Result{}, err
}
}
podList, err := p.listPods(ctx, &sts)
if err != nil {
return reconcile.Result{}, err
}
if len(podList.Items) == 1 {
serverPod := podList.Items[0]
if !serverPod.DeletionTimestamp.IsZero() {
if controllerutil.RemoveFinalizer(&serverPod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, &serverPod); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
}
for _, pod := range podList.Items {
if err := p.handleServerPod(ctx, cluster, &pod); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1beta1.Cluster, pod *v1.Pod) error {
log := ctrl.LoggerFrom(ctx)
log.Info("handling server pod")
if pod.DeletionTimestamp.IsZero() {
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
return p.Client.Update(ctx, pod)
}
return nil
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
// check if cluster is deleted then remove the finalizer from the pod
if cluster.Name == "" {
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
tlsConfig, err := p.getETCDTLS(ctx, &cluster)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
fmt.Sprintf("https://%s.%s:2379", server.ServiceName(cluster.Name), pod.Namespace),
},
TLS: tlsConfig,
})
if err != nil {
return err
}
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
return err
}
// remove our finalizer from the list and update it.
if controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, pod); err != nil {
return err
}
}
return nil
}
func (p *StatefulSetReconciler) getETCDTLS(ctx context.Context, cluster *v1beta1.Cluster) (*tls.Config, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("generating etcd TLS client certificate", "cluster", cluster)
token, err := p.clusterToken(ctx, cluster)
if err != nil {
return nil, err
}
endpoint := server.ServiceName(cluster.Name) + "." + cluster.Namespace
var b *bootstrap.ControlRuntimeBootstrap
if err := retry.OnError(k3kcontroller.Backoff, func(err error) bool {
return true
}, func() error {
var err error
b, err = bootstrap.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := certs.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 0, b.ETCDServerCA.Content, b.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(b.ETCDServerCA.Content))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
log := ctrl.LoggerFrom(ctx)
log.Info("removing peer from cluster", "name", name, "address", address)
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
log.Info("removing member from etcd", "name", member.Name, "id", member.ID, "address", address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
}
return nil
}
func (p *StatefulSetReconciler) clusterToken(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
var tokenSecret v1.Secret
nn := types.NamespacedName{
Name: TokenSecretName(cluster.Name),
Namespace: cluster.Namespace,
}
if cluster.Spec.TokenSecretRef != nil {
nn.Name = TokenSecretName(cluster.Name)
}
if err := p.Client.Get(ctx, nn, &tokenSecret); err != nil {
return "", err
}
if _, ok := tokenSecret.Data["token"]; !ok {
return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name)
}
return string(tokenSecret.Data["token"]), nil
}
func (p *StatefulSetReconciler) handleDeletion(ctx context.Context, sts *apps.StatefulSet) (ctrl.Result, error) {
podList, err := p.listPods(ctx, sts)
if err != nil {
return reconcile.Result{}, err
}
for _, pod := range podList.Items {
if controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName) {
if err := p.Client.Update(ctx, &pod); err != nil {
return reconcile.Result{}, err
}
}
}
if controllerutil.RemoveFinalizer(sts, etcdPodFinalizerName) {
return reconcile.Result{}, p.Client.Update(ctx, sts)
}
return reconcile.Result{}, nil
}
func (p *StatefulSetReconciler) listPods(ctx context.Context, sts *apps.StatefulSet) (*v1.PodList, error) {
selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("failed to create selector from statefulset: %w", err)
}
listOpts := &ctrlruntimeclient.ListOptions{
Namespace: sts.Namespace,
LabelSelector: selector,
}
var podList v1.PodList
if err := p.Client.List(ctx, &podList, listOpts); err != nil {
return nil, ctrlruntimeclient.IgnoreNotFound(err)
}
return &podList, nil
}

View File

@@ -8,7 +8,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap"
)
@@ -24,9 +24,9 @@ const (
ReasonTerminating = "Terminating"
)
func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr error) {
func (c *ClusterReconciler) updateStatus(cluster *v1beta1.Cluster, reconcileErr error) {
if !cluster.DeletionTimestamp.IsZero() {
cluster.Status.Phase = v1alpha1.ClusterTerminating
cluster.Status.Phase = v1beta1.ClusterTerminating
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -39,7 +39,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
// Handle validation errors specifically to set the Pending phase.
if errors.Is(reconcileErr, ErrClusterValidation) {
cluster.Status.Phase = v1alpha1.ClusterPending
cluster.Status.Phase = v1beta1.ClusterPending
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -53,7 +53,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
}
if errors.Is(reconcileErr, bootstrap.ErrServerNotReady) {
cluster.Status.Phase = v1alpha1.ClusterProvisioning
cluster.Status.Phase = v1beta1.ClusterProvisioning
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -66,7 +66,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
// If there's an error, but it's not a validation error, the cluster is in a failed state.
if reconcileErr != nil {
cluster.Status.Phase = v1alpha1.ClusterFailed
cluster.Status.Phase = v1beta1.ClusterFailed
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionFalse,
@@ -80,7 +80,7 @@ func (c *ClusterReconciler) updateStatus(cluster *v1alpha1.Cluster, reconcileErr
}
// If we reach here, everything is successful.
cluster.Status.Phase = v1alpha1.ClusterReady
cluster.Status.Phase = v1beta1.ClusterReady
newCondition := metav1.Condition{
Type: ConditionReady,
Status: metav1.ConditionTrue,

View File

@@ -15,11 +15,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
)
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
func (c *ClusterReconciler) token(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
if cluster.Spec.TokenSecretRef == nil {
return c.ensureTokenSecret(ctx, cluster)
}
@@ -42,7 +42,7 @@ func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster
return string(tokenSecret.Data["token"]), nil
}
func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) {
func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1beta1.Cluster) (string, error) {
log := ctrl.LoggerFrom(ctx)
// check if the secret is already created

View File

@@ -9,7 +9,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
const (
@@ -28,7 +28,7 @@ var Backoff = wait.Backoff{
// Image returns the rancher/k3s image tagged with the specified Version.
// If Version is empty it will use with the same k8s version of the host cluster,
// stored in the Status object. It will return the latest version as last fallback.
func K3SImage(cluster *v1alpha1.Cluster, k3SImage string) string {
func K3SImage(cluster *v1beta1.Cluster, k3SImage string) string {
image := k3SImage
imageVersion := "latest"

View File

@@ -7,12 +7,12 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
func Test_K3S_Image(t *testing.T) {
type args struct {
cluster *v1alpha1.Cluster
cluster *v1beta1.Cluster
k3sImage string
}
@@ -25,12 +25,12 @@ func Test_K3S_Image(t *testing.T) {
name: "cluster with assigned version spec",
args: args{
k3sImage: "rancher/k3s",
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
Version: "v1.2.3",
},
},
@@ -41,12 +41,12 @@ func Test_K3S_Image(t *testing.T) {
name: "cluster with empty version spec and assigned hostVersion status",
args: args{
k3sImage: "rancher/k3s",
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",
},
Status: v1alpha1.ClusterStatus{
Status: v1beta1.ClusterStatus{
HostVersion: "v4.5.6",
},
},
@@ -57,7 +57,7 @@ func Test_K3S_Image(t *testing.T) {
name: "cluster with empty version spec and empty hostVersion status",
args: args{
k3sImage: "rancher/k3s",
cluster: &v1alpha1.Cluster{
cluster: &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: "ns-1",

View File

@@ -17,7 +17,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/cluster/server"
@@ -39,7 +39,7 @@ func New() *KubeConfig {
}
}
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, port int) (*clientcmdapi.Config, error) {
func (k *KubeConfig) Generate(ctx context.Context, client client.Client, cluster *v1beta1.Cluster, hostServerIP string, port int) (*clientcmdapi.Config, error) {
bootstrapData, err := bootstrap.GetFromSecret(ctx, client, cluster)
if err != nil {
return nil, err
@@ -93,7 +93,7 @@ func NewConfig(url string, serverCA, clientCert, clientKey []byte) *clientcmdapi
return config
}
func getURLFromService(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string, serverPort int) (string, error) {
func getURLFromService(ctx context.Context, client client.Client, cluster *v1beta1.Cluster, hostServerIP string, serverPort int) (string, error) {
// get the server service to extract the right IP
key := types.NamespacedName{
Name: server.ServiceName(cluster.Name),

View File

@@ -11,11 +11,11 @@ import (
networkingv1 "k8s.io/api/networking/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
)
// reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) {
func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling PSA labels")
@@ -33,7 +33,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx
namespace.Labels["pod-security.kubernetes.io/enforce-version"] = "latest"
// skip the 'warn' only for the privileged PSA level
if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel {
if psaLevel != v1beta1.PrivilegedPodSecurityAdmissionLevel {
namespace.Labels["pod-security.kubernetes.io/warn"] = string(psaLevel)
namespace.Labels["pod-security.kubernetes.io/warn-version"] = "latest"
}

View File

@@ -11,11 +11,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling NetworkPolicy")
@@ -59,7 +59,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Cont
return err
}
func networkPolicy(namespaceName string, policy *v1alpha1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
func networkPolicy(namespaceName string, policy *v1beta1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy {
return &networkingv1.NetworkPolicy{
TypeMeta: metav1.TypeMeta{
Kind: "NetworkPolicy",

View File

@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
)
@@ -46,10 +46,10 @@ func Add(mgr manager.Manager, clusterCIDR string, maxConcurrentReconciles int) e
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.VirtualClusterPolicy{}).
For(&v1beta1.VirtualClusterPolicy{}).
Watches(&v1.Namespace{}, namespaceEventHandler()).
Watches(&v1.Node{}, nodeEventHandler(&reconciler)).
Watches(&v1alpha1.Cluster{}, clusterEventHandler(&reconciler)).
Watches(&v1beta1.Cluster{}, clusterEventHandler(&reconciler)).
Owns(&networkingv1.NetworkPolicy{}).
Owns(&v1.ResourceQuota{}).
Owns(&v1.LimitRange{}).
@@ -129,7 +129,7 @@ func namespaceEventHandler() handler.Funcs {
func nodeEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
// enqueue all the available VirtualClusterPolicies
enqueueAllVCPs := func(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
vcpList := &v1alpha1.VirtualClusterPolicyList{}
vcpList := &v1beta1.VirtualClusterPolicyList{}
if err := r.Client.List(ctx, vcpList); err != nil {
return
}
@@ -193,7 +193,7 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
return handler.Funcs{
// When a Cluster is created, if its Namespace has the "policy.k3k.io/policy-name" label
CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
cluster, ok := e.Object.(*v1alpha1.Cluster)
cluster, ok := e.Object.(*v1beta1.Cluster)
if !ok {
return
}
@@ -210,8 +210,8 @@ func clusterEventHandler(r *VirtualClusterPolicyReconciler) handler.Funcs {
// When a Cluster is updated, if its Namespace has the "policy.k3k.io/policy-name" label
// and if some of its spec influenced by the policy changed
UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) {
oldCluster, okOld := e.ObjectOld.(*v1alpha1.Cluster)
newCluster, okNew := e.ObjectNew.(*v1alpha1.Cluster)
oldCluster, okOld := e.ObjectOld.(*v1beta1.Cluster)
newCluster, okNew := e.ObjectNew.(*v1beta1.Cluster)
if !okOld || !okNew {
return
@@ -250,7 +250,7 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling VirtualClusterPolicy")
var policy v1alpha1.VirtualClusterPolicy
var policy v1beta1.VirtualClusterPolicy
if err := c.Client.Get(ctx, req.NamespacedName, &policy); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
@@ -281,7 +281,7 @@ func (c *VirtualClusterPolicyReconciler) Reconcile(ctx context.Context, req reco
return reconcile.Result{}, nil
}
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
if err := c.reconcileMatchingNamespaces(ctx, policy); err != nil {
return err
}
@@ -293,7 +293,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileVirtualClusterPolicy(ctx conte
return nil
}
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context.Context, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling matching Namespaces")
@@ -340,7 +340,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileMatchingNamespaces(ctx context
return nil
}
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling ResourceQuota")
@@ -389,7 +389,7 @@ func (c *VirtualClusterPolicyReconciler) reconcileQuota(ctx context.Context, nam
return err
}
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling LimitRange")
@@ -437,11 +437,11 @@ func (c *VirtualClusterPolicyReconciler) reconcileLimit(ctx context.Context, nam
return err
}
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) error {
func (c *VirtualClusterPolicyReconciler) reconcileClusters(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) error {
log := ctrl.LoggerFrom(ctx)
log.Info("reconciling Clusters")
var clusters v1alpha1.ClusterList
var clusters v1beta1.ClusterList
if err := c.Client.List(ctx, &clusters, client.InNamespace(namespace.Name)); err != nil {
return err
}

View File

@@ -16,7 +16,7 @@ import (
networkingv1 "k8s.io/api/networking/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/policy"
. "github.com/onsi/ginkgo/v2"
@@ -81,7 +81,7 @@ func buildScheme() *runtime.Scheme {
Expect(err).NotTo(HaveOccurred())
err = networkingv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme

View File

@@ -15,7 +15,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
k3kcontroller "github.com/rancher/k3k/pkg/controller"
"github.com/rancher/k3k/pkg/controller/policy"
@@ -26,25 +26,25 @@ import (
var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("VirtualClusterPolicy"), func() {
Context("creating a VirtualClusterPolicy", func() {
It("should have the 'shared' allowedMode", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.SharedClusterMode))
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
Expect(policy.Spec.AllowedMode).To(Equal(v1beta1.SharedClusterMode))
})
It("should have the 'virtual' mode if specified", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
AllowedMode: v1alpha1.VirtualClusterMode,
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
AllowedMode: v1beta1.VirtualClusterMode,
})
Expect(policy.Spec.AllowedMode).To(Equal(v1alpha1.VirtualClusterMode))
Expect(policy.Spec.AllowedMode).To(Equal(v1beta1.VirtualClusterMode))
})
It("should fail for a non-existing mode", func() {
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
Spec: v1alpha1.VirtualClusterPolicySpec{
AllowedMode: v1alpha1.ClusterMode("non-existing"),
Spec: v1beta1.VirtualClusterPolicySpec{
AllowedMode: v1beta1.ClusterMode("non-existing"),
},
}
@@ -67,7 +67,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should create a NetworkPolicy", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
bindPolicyToNamespace(namespace, policy)
// look for network policies etc
@@ -122,7 +122,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should recreate the NetworkPolicy if deleted", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{})
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{})
bindPolicyToNamespace(namespace, policy)
// look for network policy
@@ -164,12 +164,12 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
It("should add and update the proper pod-security labels to the namespace", func() {
var (
privileged = v1alpha1.PrivilegedPodSecurityAdmissionLevel
baseline = v1alpha1.BaselinePodSecurityAdmissionLevel
restricted = v1alpha1.RestrictedPodSecurityAdmissionLevel
privileged = v1beta1.PrivilegedPodSecurityAdmissionLevel
baseline = v1beta1.BaselinePodSecurityAdmissionLevel
restricted = v1beta1.RestrictedPodSecurityAdmissionLevel
)
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
PodSecurityAdmissionLevel: &privileged,
})
@@ -264,9 +264,9 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should restore the labels if Namespace is updated", func() {
privileged := v1alpha1.PrivilegedPodSecurityAdmissionLevel
privileged := v1beta1.PrivilegedPodSecurityAdmissionLevel
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
PodSecurityAdmissionLevel: &privileged,
})
@@ -308,19 +308,19 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should update Cluster's PriorityClass", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultPriorityClass: "foobar",
})
bindPolicyToNamespace(namespace, policy)
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
@@ -342,7 +342,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should update Cluster's NodeSelector", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
})
bindPolicyToNamespace(namespace, policy)
@@ -350,13 +350,13 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
err := k8sClient.Update(ctx, policy)
Expect(err).To(Not(HaveOccurred()))
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
@@ -378,18 +378,18 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should update the nodeSelector if changed", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
DefaultNodeSelector: map[string]string{"label-1": "value-1"},
})
bindPolicyToNamespace(namespace, policy)
cluster := &v1alpha1.Cluster{
cluster := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
NodeSelector: map[string]string{"label-1": "value-1"},
@@ -426,7 +426,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
// wait a bit and check it's restored
Eventually(func() bool {
var updatedCluster v1alpha1.Cluster
var updatedCluster v1beta1.Cluster
key := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}
err = k8sClient.Get(ctx, key, &updatedCluster)
@@ -439,7 +439,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should create a ResourceQuota if Quota is enabled", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
@@ -467,7 +467,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should delete the ResourceQuota if Quota is deleted", func() {
policy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
policy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
@@ -513,7 +513,7 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
It("should delete the ResourceQuota if unbound", func() {
clusterPolicy := newPolicy(v1alpha1.VirtualClusterPolicySpec{
clusterPolicy := newPolicy(v1beta1.VirtualClusterPolicySpec{
Quota: &v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("800m"),
@@ -558,10 +558,10 @@ var _ = Describe("VirtualClusterPolicy Controller", Label("controller"), Label("
})
})
func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterPolicy {
func newPolicy(spec v1beta1.VirtualClusterPolicySpec) *v1beta1.VirtualClusterPolicy {
GinkgoHelper()
policy := &v1alpha1.VirtualClusterPolicy{
policy := &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
@@ -574,7 +574,7 @@ func newPolicy(spec v1alpha1.VirtualClusterPolicySpec) *v1alpha1.VirtualClusterP
return policy
}
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1alpha1.VirtualClusterPolicy) {
func bindPolicyToNamespace(namespace *v1.Namespace, pol *v1beta1.VirtualClusterPolicy) {
GinkgoHelper()
if len(namespace.Labels) == 0 {

View File

@@ -3,50 +3,36 @@ package log
import (
"os"
"github.com/virtual-kubelet/virtual-kubelet/log"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
ctrlruntimezap "sigs.k8s.io/controller-runtime/pkg/log/zap"
)
type Logger struct {
*zap.SugaredLogger
}
func New(debug bool) *Logger {
return &Logger{newZappLogger(debug).Sugar()}
}
func (l *Logger) WithError(err error) log.Logger {
return l
}
func (l *Logger) WithField(string, any) log.Logger {
return l
}
func (l *Logger) WithFields(field log.Fields) log.Logger {
return l
}
func (l *Logger) Named(name string) *Logger {
l.SugaredLogger = l.SugaredLogger.Named(name)
return l
}
func newZappLogger(debug bool) *zap.Logger {
encCfg := zap.NewProductionEncoderConfig()
encCfg.TimeKey = "timestamp"
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
func New(debug bool, format string) *zap.Logger {
lvl := zap.NewAtomicLevelAt(zap.InfoLevel)
if debug {
lvl = zap.NewAtomicLevelAt(zap.DebugLevel)
}
encoder := zapcore.NewJSONEncoder(encCfg)
core := zapcore.NewCore(&ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}, zapcore.AddSync(os.Stderr), lvl)
encoder := newEncoder(format)
core := zapcore.NewCore(encoder, zapcore.AddSync(os.Stderr), lvl)
return zap.New(core)
}
func newEncoder(format string) zapcore.Encoder {
encCfg := zap.NewProductionEncoderConfig()
encCfg.TimeKey = "timestamp"
encCfg.EncodeTime = zapcore.ISO8601TimeEncoder
var encoder zapcore.Encoder
if format == "console" {
encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
encoder = zapcore.NewConsoleEncoder(encCfg)
} else {
encoder = zapcore.NewJSONEncoder(encCfg)
}
return &ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}
}

113
tests/cluster_certs_test.go Normal file
View File

@@ -0,0 +1,113 @@
package k3k_test
import (
"context"
"os"
"strings"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
// create custom cert secret
customCertDir := "testdata/customcerts/"
certList := []string{
"server-ca",
"client-ca",
"request-header-ca",
"service",
"etcd-peer-ca",
"etcd-server-ca",
}
for _, certName := range certList {
var cert, key []byte
var err error
filePathPrefix := ""
certfile := certName
if strings.HasPrefix(certName, "etcd") {
filePathPrefix = "etcd/"
certfile = strings.TrimPrefix(certName, "etcd-")
}
if !strings.Contains(certName, "service") {
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
Expect(err).To(Not(HaveOccurred()))
}
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
Expect(err).To(Not(HaveOccurred()))
certSecret := caCertSecret(certName, namespace.Name, cert, key)
err = k8sClient.Create(ctx, certSecret)
Expect(err).To(Not(HaveOccurred()))
}
cluster := NewCluster(namespace.Name)
cluster.Spec.CustomCAs = v1beta1.CustomCAs{
Enabled: true,
Sources: v1beta1.CredentialSources{
ServerCA: v1beta1.CredentialSource{
SecretName: "server-ca",
},
ClientCA: v1beta1.CredentialSource{
SecretName: "client-ca",
},
ETCDServerCA: v1beta1.CredentialSource{
SecretName: "etcd-server-ca",
},
ETCDPeerCA: v1beta1.CredentialSource{
SecretName: "etcd-peer-ca",
},
RequestHeaderCA: v1beta1.CredentialSource{
SecretName: "request-header-ca",
},
ServiceAccountToken: v1beta1.CredentialSource{
SecretName: "service",
},
},
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
It("will load the custom certs in the server pod", func() {
ctx := context.Background()
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
// check server-ca.crt
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"
serverCACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, serverCACrtPath)
Expect(err).To(Not(HaveOccurred()))
serverCACrtTestFile, err := os.ReadFile("testdata/customcerts/server-ca.crt")
Expect(err).To(Not(HaveOccurred()))
Expect(serverCACrt).To(Equal(serverCACrtTestFile))
})
})

View File

@@ -0,0 +1,215 @@
package k3k_test
import (
"context"
"crypto/x509"
"errors"
"fmt"
"time"
"k8s.io/utils/ptr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("an ephemeral cluster is installed", Label("e2e"), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
virtualCluster = NewVirtualCluster()
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("deletes the pod in the virtual cluster when deleted from the host", func() {
ctx := context.Background()
pod, _ := virtualCluster.NewNginxPod("")
hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster)
namespacedName := hostTranslator.NamespacedName(pod)
err := k8s.CoreV1().Pods(namespacedName.Namespace).Delete(ctx, namespacedName.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
Eventually(func() bool {
_, err := virtualCluster.Client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, v1.GetOptions{})
return apierrors.IsNotFound(err)
}).
WithPolling(time.Second * 5).
WithTimeout(time.Minute).
Should(BeTrue())
})
It("regenerates the bootstrap secret after a restart", func() {
ctx := context.Background()
_, err := virtualCluster.Client.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeNil())
By("Server pod up and running again")
By("Using old k8s client configuration should fail")
Eventually(func() bool {
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
var unknownAuthorityErr x509.UnknownAuthorityError
return errors.As(err, &unknownAuthorityErr)
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeTrue())
By("Recover new config should succeed")
Eventually(func() error {
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeNil())
})
})
var _ = When("a dynamic cluster is installed", Label("e2e"), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
virtualCluster = NewVirtualClusterWithType(v1beta1.DynamicPersistenceMode)
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("can delete the cluster", func() {
ctx := context.Background()
By("Deleting cluster")
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
Expect(err).To(Not(HaveOccurred()))
Eventually(func() []corev1.Pod {
By("listing the pods in the namespace")
podList, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
GinkgoLogr.Info("podlist", "len", len(podList.Items))
return podList.Items
}).
WithTimeout(2 * time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})
It("can delete a HA cluster", func() {
ctx := context.Background()
namespace := NewNamespace()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = v1beta1.DynamicPersistenceMode
cluster.Spec.Servers = ptr.To[int32](2)
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
By(fmt.Sprintf("Created virtual cluster %s/%s", cluster.Namespace, cluster.Name))
virtualCluster := &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
By("Deleting cluster")
err := k8sClient.Delete(ctx, virtualCluster.Cluster)
Expect(err).To(Not(HaveOccurred()))
Eventually(func() []corev1.Pod {
By("listing the pods in the namespace")
podList, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
GinkgoLogr.Info("podlist", "len", len(podList.Items))
return podList.Items
}).
WithTimeout(time.Minute * 3).
WithPolling(time.Second).
Should(BeEmpty())
})
It("uses the same bootstrap secret after a restart", func() {
ctx := context.Background()
_, err := virtualCluster.Client.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
restartServerPod(ctx, virtualCluster)
By("Server pod up and running again")
By("Using old k8s client configuration should succeed")
Eventually(func() error {
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(2 * time.Minute).
WithPolling(time.Second * 5).
Should(BeNil())
})
})

View File

@@ -10,7 +10,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/policy"
@@ -21,7 +21,7 @@ import (
var _ = When("a cluster's status is tracked", Label("e2e"), func() {
var (
namespace *corev1.Namespace
vcp *v1alpha1.VirtualClusterPolicy
vcp *v1beta1.VirtualClusterPolicy
)
// This BeforeEach/AfterEach will create a new namespace and a default policy for each test.
@@ -29,7 +29,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
ctx := context.Background()
namespace = NewNamespace()
vcp = &v1alpha1.VirtualClusterPolicy{
vcp = &v1beta1.VirtualClusterPolicy{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "policy-",
},
@@ -53,7 +53,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
It("should start with Provisioning status and transition to Ready", func() {
ctx := context.Background()
clusterObj := &v1alpha1.Cluster{
clusterObj := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "status-cluster-",
Namespace: namespace.Name,
@@ -68,7 +68,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterProvisioning))
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterProvisioning))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())
@@ -84,7 +84,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterReady))
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterReady))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())
@@ -101,13 +101,13 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
It("should be in Pending status with ValidationFailed reason", func() {
ctx := context.Background()
clusterObj := &v1alpha1.Cluster{
clusterObj := &v1beta1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace.Name,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.VirtualClusterMode,
Spec: v1beta1.ClusterSpec{
Mode: v1beta1.VirtualClusterMode,
},
}
Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed())
@@ -119,7 +119,7 @@ var _ = When("a cluster's status is tracked", Label("e2e"), func() {
err := k8sClient.Get(ctx, clusterKey, clusterObj)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(clusterObj.Status.Phase).To(Equal(v1alpha1.ClusterPending))
g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterPending))
cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady)
g.Expect(cond).NotTo(BeNil())

View File

@@ -1,277 +0,0 @@
package k3k_test
import (
"context"
"crypto/x509"
"errors"
"os"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("k3k is installed", Label("e2e"), func() {
It("is in Running status", func() {
// check that the controller is running
Eventually(func() bool {
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
podList, err := k8s.CoreV1().Pods("k3k-system").List(context.Background(), opts)
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).To(Not(BeEmpty()))
var isRunning bool
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
isRunning = true
break
}
}
return isRunning
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})
var _ = When("a ephemeral cluster is installed", Label("e2e"), func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
virtualCluster = NewVirtualCluster()
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("regenerates the bootstrap secret after a restart", func() {
ctx := context.Background()
_, err := virtualCluster.Client.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeNil())
By("Server pod up and running again")
By("Using old k8s client configuration should fail")
Eventually(func() bool {
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
var unknownAuthorityErr x509.UnknownAuthorityError
return errors.As(err, &unknownAuthorityErr)
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeTrue())
By("Recover new config should succeed")
Eventually(func() error {
virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster)
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeNil())
})
})
var _ = When("a dynamic cluster is installed", func() {
var virtualCluster *VirtualCluster
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = v1alpha1.DynamicPersistenceMode
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
AfterEach(func() {
DeleteNamespaces(virtualCluster.Cluster.Namespace)
})
It("can create a nginx pod", func() {
_, _ = virtualCluster.NewNginxPod("")
})
It("use the same bootstrap secret after a restart", func() {
ctx := context.Background()
_, err := virtualCluster.Client.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).
WithTimeout(60 * time.Second).
WithPolling(time.Second * 5).
Should(BeNil())
By("Server pod up and running again")
By("Using old k8s client configuration should succeed")
Eventually(func() error {
_, err = virtualCluster.Client.DiscoveryClient.ServerVersion()
return err
}).
WithTimeout(2 * time.Minute).
WithPolling(time.Second * 5).
Should(BeNil())
})
})
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), func() {
ctx := context.Background()
var virtualCluster *VirtualCluster
BeforeEach(func() {
namespace := NewNamespace()
// create custom cert secret
customCertDir := "testdata/customcerts/"
certList := []string{
"server-ca",
"client-ca",
"request-header-ca",
"service",
"etcd-peer-ca",
"etcd-server-ca",
}
for _, certName := range certList {
var cert, key []byte
var err error
filePathPrefix := ""
certfile := certName
if strings.HasPrefix(certName, "etcd") {
filePathPrefix = "etcd/"
certfile = strings.TrimPrefix(certName, "etcd-")
}
if !strings.Contains(certName, "service") {
cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt")
Expect(err).To(Not(HaveOccurred()))
}
key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key")
Expect(err).To(Not(HaveOccurred()))
certSecret := caCertSecret(certName, namespace.Name, cert, key)
err = k8sClient.Create(ctx, certSecret)
Expect(err).To(Not(HaveOccurred()))
}
cluster := NewCluster(namespace.Name)
cluster.Spec.CustomCAs = v1alpha1.CustomCAs{
Enabled: true,
Sources: v1alpha1.CredentialSources{
ServerCA: v1alpha1.CredentialSource{
SecretName: "server-ca",
},
ClientCA: v1alpha1.CredentialSource{
SecretName: "client-ca",
},
ETCDServerCA: v1alpha1.CredentialSource{
SecretName: "etcd-server-ca",
},
ETCDPeerCA: v1alpha1.CredentialSource{
SecretName: "etcd-peer-ca",
},
RequestHeaderCA: v1alpha1.CredentialSource{
SecretName: "request-header-ca",
},
ServiceAccountToken: v1alpha1.CredentialSource{
SecretName: "service",
},
},
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
})
It("will load the custom certs in the server pod", func() {
_, _ = virtualCluster.NewNginxPod("")
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
// check server-ca.crt
serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt"
serverCACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, serverCACrtPath)
Expect(err).To(Not(HaveOccurred()))
serverCACrtTestFile, err := os.ReadFile("testdata/customcerts/server-ca.crt")
Expect(err).To(Not(HaveOccurred()))
Expect(serverCACrt).To(Equal(serverCACrtTestFile))
})
})

View File

@@ -0,0 +1,582 @@
package k3k_test
import (
"context"
"strings"
"time"
"k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/utils/ptr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("a shared mode cluster update its envs", Label("e2e"), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial environment variables for server
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_SERVER_ENV_2",
Value: "toBeRemoved",
},
}
// Add initial environment variables for agent
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_AGENT_ENV_2",
Value: "toBeRemoved",
},
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
serverEnv1, ok := getEnv(&serverPod, "TEST_SERVER_ENV_1")
Expect(ok).To(BeTrue())
Expect(serverEnv1).To(Equal("not_upgraded"))
serverEnv2, ok := getEnv(&serverPod, "TEST_SERVER_ENV_2")
Expect(ok).To(BeTrue())
Expect(serverEnv2).To(Equal("toBeRemoved"))
aPods := listAgentPods(ctx, virtualCluster)
Expect(len(aPods)).To(Equal(1))
agentPod := aPods[0]
agentEnv1, ok := getEnv(&agentPod, "TEST_AGENT_ENV_1")
Expect(ok).To(BeTrue())
Expect(agentEnv1).To(Equal("not_upgraded"))
agentEnv2, ok := getEnv(&agentPod, "TEST_AGENT_ENV_2")
Expect(ok).To(BeTrue())
Expect(agentEnv2).To(Equal("toBeRemoved"))
})
It("will update server and agent envs when cluster is updated", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
// update both agent and server envs
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_SERVER_ENV_3",
Value: "new",
},
}
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_AGENT_ENV_3",
Value: "new",
},
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv1).To(Equal("upgraded"))
_, ok = getEnv(&serverPods[0], "TEST_SERVER_ENV_2")
g.Expect(ok).To(BeFalse())
serverEnv3, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv3).To(Equal("new"))
// agent pods
aPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(aPods)).To(Equal(1))
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv1).To(Equal("upgraded"))
_, ok = getEnv(&aPods[0], "TEST_AGENT_ENV_2")
g.Expect(ok).To(BeFalse())
agentEnv3, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv3).To(Equal("new"))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a shared mode cluster update its server args", Label("e2e"), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial args for server
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=not_upgraded",
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(isArgFound(&serverPod, "--node-label=test_server=not_upgraded")).To(BeTrue())
})
It("will update server args", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=upgraded",
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
sPods := listServerPods(ctx, virtualCluster)
g.Expect(len(sPods)).To(Equal(1))
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster update its envs", Label("e2e"), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial environment variables for server
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_SERVER_ENV_2",
Value: "toBeRemoved",
},
}
// Add initial environment variables for agent
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "not_upgraded",
},
{
Name: "TEST_AGENT_ENV_2",
Value: "toBeRemoved",
},
}
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To(int32(1))
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
serverEnv1, ok := getEnv(&serverPod, "TEST_SERVER_ENV_1")
Expect(ok).To(BeTrue())
Expect(serverEnv1).To(Equal("not_upgraded"))
serverEnv2, ok := getEnv(&serverPod, "TEST_SERVER_ENV_2")
Expect(ok).To(BeTrue())
Expect(serverEnv2).To(Equal("toBeRemoved"))
aPods := listAgentPods(ctx, virtualCluster)
Expect(len(aPods)).To(Equal(1))
agentPod := aPods[0]
agentEnv1, ok := getEnv(&agentPod, "TEST_AGENT_ENV_1")
Expect(ok).To(BeTrue())
Expect(agentEnv1).To(Equal("not_upgraded"))
agentEnv2, ok := getEnv(&agentPod, "TEST_AGENT_ENV_2")
Expect(ok).To(BeTrue())
Expect(agentEnv2).To(Equal("toBeRemoved"))
})
It("will update server and agent envs when cluster is updated", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
// update both agent and server envs
cluster.Spec.ServerEnvs = []v1.EnvVar{
{
Name: "TEST_SERVER_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_SERVER_ENV_3",
Value: "new",
},
}
cluster.Spec.AgentEnvs = []v1.EnvVar{
{
Name: "TEST_AGENT_ENV_1",
Value: "upgraded",
},
{
Name: "TEST_AGENT_ENV_3",
Value: "new",
},
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv1).To(Equal("upgraded"))
_, ok = getEnv(&serverPods[0], "TEST_SERVER_ENV_2")
g.Expect(ok).To(BeFalse())
serverEnv3, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(serverEnv3).To(Equal("new"))
// agent pods
aPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(aPods)).To(Equal(1))
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv1).To(Equal("upgraded"))
_, ok = getEnv(&aPods[0], "TEST_AGENT_ENV_2")
g.Expect(ok).To(BeFalse())
agentEnv3, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_3")
g.Expect(ok).To(BeTrue())
g.Expect(agentEnv3).To(Equal("new"))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster update its server args", Label("e2e"), func() {
var virtualCluster *VirtualCluster
ctx := context.Background()
BeforeEach(func() {
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial args for server
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=not_upgraded",
}
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To(int32(1))
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(isArgFound(&serverPod, "--node-label=test_server=not_upgraded")).To(BeTrue())
})
It("will update server args", func() {
Eventually(func(g Gomega) {
var cluster v1beta1.Cluster
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
g.Expect(err).NotTo(HaveOccurred())
cluster.Spec.ServerArgs = []string{
"--node-label=test_server=upgraded",
}
err = k8sClient.Update(ctx, &cluster)
g.Expect(err).NotTo(HaveOccurred())
// server pods
sPods := listServerPods(ctx, virtualCluster)
g.Expect(len(sPods)).To(Equal(1))
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 2).
Should(Succeed())
})
})
var _ = When("a shared mode cluster update its version", Label("e2e"), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial version
cluster.Spec.Version = "v1.31.13-k3s1"
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will update server version when version spec is updated", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// update cluster version
cluster.Spec.Version = "v1.32.8-k3s1"
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
condIndex, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
clusterVersion, err := virtualCluster.Client.Discovery().ServerVersion()
g.Expect(err).To(BeNil())
g.Expect(clusterVersion.String()).To(Equal(strings.ReplaceAll(cluster.Spec.Version, "-", "+")))
_, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
condIndex, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})
var _ = When("a virtual mode cluster update its version", Label("e2e"), func() {
var (
virtualCluster *VirtualCluster
nginxPod *v1.Pod
)
BeforeEach(func() {
ctx := context.Background()
namespace := NewNamespace()
cluster := NewCluster(namespace.Name)
// Add initial version
cluster.Spec.Version = "v1.31.13-k3s1"
cluster.Spec.Mode = v1beta1.VirtualClusterMode
cluster.Spec.Agents = ptr.To(int32(1))
// need to enable persistence for this
cluster.Spec.Persistence = v1beta1.PersistenceConfig{
Type: v1beta1.DynamicPersistenceMode,
}
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
virtualCluster = &VirtualCluster{
Cluster: cluster,
RestConfig: restConfig,
Client: client,
}
sPods := listServerPods(ctx, virtualCluster)
Expect(len(sPods)).To(Equal(1))
serverPod := sPods[0]
Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
aPods := listAgentPods(ctx, virtualCluster)
Expect(len(aPods)).To(Equal(1))
agentPod := aPods[0]
Expect(agentPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
nginxPod, _ = virtualCluster.NewNginxPod("")
})
It("will update server version when version spec is updated", func() {
var cluster v1beta1.Cluster
ctx := context.Background()
err := k8sClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(virtualCluster.Cluster), &cluster)
Expect(err).NotTo(HaveOccurred())
// update cluster version
cluster.Spec.Version = "v1.32.8-k3s1"
err = k8sClient.Update(ctx, &cluster)
Expect(err).NotTo(HaveOccurred())
Eventually(func(g Gomega) {
// server pods
serverPods := listServerPods(ctx, virtualCluster)
g.Expect(len(serverPods)).To(Equal(1))
serverPod := serverPods[0]
condIndex, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(serverPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
// agent pods
agentPods := listAgentPods(ctx, virtualCluster)
g.Expect(len(agentPods)).To(Equal(1))
agentPod := agentPods[0]
condIndex, cond = pod.GetPodCondition(&agentPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
g.Expect(agentPod.Spec.Containers[0].Image).To(Equal("rancher/k3s:" + cluster.Spec.Version))
clusterVersion, err := virtualCluster.Client.Discovery().ServerVersion()
g.Expect(err).To(BeNil())
g.Expect(clusterVersion.String()).To(Equal(strings.ReplaceAll(cluster.Spec.Version, "-", "+")))
nginxPod, err = virtualCluster.Client.CoreV1().Pods(nginxPod.Namespace).Get(ctx, nginxPod.Name, metav1.GetOptions{})
g.Expect(err).To(BeNil())
condIndex, cond = pod.GetPodCondition(&nginxPod.Status, v1.PodReady)
g.Expect(condIndex).NotTo(Equal(-1))
g.Expect(cond).NotTo(BeNil())
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
}).
WithPolling(time.Second * 2).
WithTimeout(time.Minute * 3).
Should(Succeed())
})
})

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"net/url"
"strings"
"sync"
"time"
@@ -20,7 +21,7 @@ import (
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/rancher/k3k/k3k-kubelet/translate"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
@@ -29,18 +30,27 @@ import (
)
type VirtualCluster struct {
Cluster *v1alpha1.Cluster
Cluster *v1beta1.Cluster
RestConfig *rest.Config
Client *kubernetes.Clientset
}
func NewVirtualCluster() *VirtualCluster {
func NewVirtualCluster() *VirtualCluster { // By default, create an ephemeral cluster
GinkgoHelper()
return NewVirtualClusterWithType(v1beta1.EphemeralPersistenceMode)
}
func NewVirtualClusterWithType(persistenceType v1beta1.PersistenceMode) *VirtualCluster {
GinkgoHelper()
namespace := NewNamespace()
By(fmt.Sprintf("Creating new virtual cluster in namespace %s", namespace.Name))
cluster := NewCluster(namespace.Name)
cluster.Spec.Persistence.Type = persistenceType
CreateCluster(cluster)
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
@@ -80,7 +90,7 @@ func NewVirtualClusters(n int) []*VirtualCluster {
func NewNamespace() *corev1.Namespace {
GinkgoHelper()
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
namespace := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-", Labels: map[string]string{"e2e": "true"}}}
namespace, err := k8s.CoreV1().Namespaces().Create(context.Background(), namespace, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
@@ -116,67 +126,85 @@ func deleteNamespace(name string) {
Expect(err).To(Not(HaveOccurred()))
}
func NewCluster(namespace string) *v1alpha1.Cluster {
return &v1alpha1.Cluster{
func NewCluster(namespace string) *v1beta1.Cluster {
return &v1beta1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "cluster-",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Spec: v1beta1.ClusterSpec{
TLSSANs: []string{hostIP},
Expose: &v1alpha1.ExposeConfig{
NodePort: &v1alpha1.NodePortConfig{},
Expose: &v1beta1.ExposeConfig{
NodePort: &v1beta1.NodePortConfig{},
},
Persistence: v1alpha1.PersistenceConfig{
Type: v1alpha1.EphemeralPersistenceMode,
Persistence: v1beta1.PersistenceConfig{
Type: v1beta1.EphemeralPersistenceMode,
},
ServerArgs: []string{
"--disable-network-policy",
},
},
}
}
func CreateCluster(cluster *v1alpha1.Cluster) {
func CreateCluster(cluster *v1beta1.Cluster) {
GinkgoHelper()
ctx := context.Background()
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
By("Waiting for cluster to be ready")
// check that the server Pod and the Kubelet are in Ready state
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(cluster.Namespace).List(ctx, v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
serverRunning := false
kubeletRunning := false
// all the servers and agents needs to be in a running phase
var serversReady, agentsReady int
for _, pod := range podList.Items {
imageName := pod.Spec.Containers[0].Image
if strings.Contains(imageName, "rancher/k3s") {
serverRunning = pod.Status.Phase == corev1.PodRunning
} else if strings.Contains(imageName, "rancher/k3k-kubelet") {
kubeletRunning = pod.Status.Phase == corev1.PodRunning
if pod.Labels["role"] == "server" {
GinkgoLogr.Info(fmt.Sprintf("server pod=%s/%s status=%s", pod.Namespace, pod.Name, pod.Status.Phase))
if pod.Status.Phase == corev1.PodRunning {
serversReady++
}
}
if serverRunning && kubeletRunning {
return true
if pod.Labels["type"] == "agent" {
GinkgoLogr.Info(fmt.Sprintf("agent pod=%s/%s status=%s", pod.Namespace, pod.Name, pod.Status.Phase))
if pod.Status.Phase == corev1.PodRunning {
agentsReady++
}
}
}
return false
expectedServers := int(*cluster.Spec.Servers)
expectedAgents := int(*cluster.Spec.Agents)
By(fmt.Sprintf("serversReady=%d/%d agentsReady=%d/%d", serversReady, expectedServers, agentsReady, expectedAgents))
// the server pods should equal the expected servers, but since in shared mode we also have the kubelet is fine to have more than one
if (serversReady != expectedServers) || (agentsReady < expectedAgents) {
return false
}
return true
}).
WithTimeout(time.Minute * 2).
WithTimeout(time.Minute * 5).
WithPolling(time.Second * 5).
Should(BeTrue())
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClient(cluster *v1alpha1.Cluster) *kubernetes.Clientset {
func NewVirtualK8sClient(cluster *v1beta1.Cluster) *kubernetes.Clientset {
virtualK8sClient, _ := NewVirtualK8sClientAndConfig(cluster)
return virtualK8sClient
}
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
func NewVirtualK8sClientAndConfig(cluster *v1alpha1.Cluster) (*kubernetes.Clientset, *rest.Config) {
func NewVirtualK8sClientAndConfig(cluster *v1beta1.Cluster) (*kubernetes.Clientset, *rest.Config) {
GinkgoHelper()
var (
@@ -236,6 +264,10 @@ func (c *VirtualCluster) NewNginxPod(namespace string) (*corev1.Pod, string) {
var podIP string
// only check the pod on the host cluster if the mode is shared mode
if c.Cluster.Spec.Mode != v1beta1.SharedClusterMode {
return nginxPod, ""
}
// check that the nginx Pod is up and running in the host cluster
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(c.Cluster.Namespace).List(ctx, v1.ListOptions{})
@@ -299,3 +331,84 @@ func (c *VirtualCluster) ExecCmd(pod *corev1.Pod, command string) (string, strin
return stdout.String(), stderr.String(), err
}
func restartServerPod(ctx context.Context, virtualCluster *VirtualCluster) {
GinkgoHelper()
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
serverPod := serverPods.Items[0]
GinkgoWriter.Printf("deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name)
err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{})
Expect(err).To(Not(HaveOccurred()))
By("Deleting server pod")
// check that the server pods restarted
Eventually(func() any {
serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
Expect(len(serverPods.Items)).To(Equal(1))
return serverPods.Items[0].DeletionTimestamp
}).WithTimeout(60 * time.Second).WithPolling(time.Second * 5).Should(BeNil())
}
func listServerPods(ctx context.Context, virtualCluster *VirtualCluster) []corev1.Pod {
labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server"
serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
return serverPods.Items
}
func listAgentPods(ctx context.Context, virtualCluster *VirtualCluster) []corev1.Pod {
labelSelector := fmt.Sprintf("cluster=%s,type=agent,mode=%s", virtualCluster.Cluster.Name, virtualCluster.Cluster.Spec.Mode)
agentPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector})
Expect(err).To(Not(HaveOccurred()))
return agentPods.Items
}
// getEnv will get an environment variable from a pod it will return empty string if not found
func getEnv(pod *corev1.Pod, envName string) (string, bool) {
container := pod.Spec.Containers[0]
for _, envVar := range container.Env {
if envVar.Name == envName {
return envVar.Value, true
}
}
return "", false
}
// isArgFound will return true if the argument passed to the function is found in container args
func isArgFound(pod *corev1.Pod, arg string) bool {
container := pod.Spec.Containers[0]
for _, cmd := range container.Command {
if strings.Contains(cmd, arg) {
return true
}
}
return false
}
func getServerIP(ctx context.Context, cfg *rest.Config) (string, error) {
if k3sContainer != nil {
return k3sContainer.ContainerIP(ctx)
}
u, err := url.Parse(cfg.Host)
if err != nil {
return "", err
}
// If Host includes a port, u.Hostname() extracts just the hostname part
return u.Hostname(), nil
}

View File

@@ -0,0 +1,35 @@
package k3k_test
import (
"context"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = When("k3k is installed", Label("e2e"), func() {
It("is in Running status", func() {
// check that the controller is running
Eventually(func() bool {
opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"}
podList, err := k8s.CoreV1().Pods(k3kNamespace).List(context.Background(), opts)
Expect(err).To(Not(HaveOccurred()))
Expect(podList.Items).To(Not(BeEmpty()))
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
return true
}
}
return false
}).
WithTimeout(time.Second * 10).
WithPolling(time.Second).
Should(BeTrue())
})
})

View File

@@ -9,6 +9,7 @@ import (
"os"
"os/exec"
"path"
"strings"
"testing"
"time"
@@ -31,7 +32,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -48,56 +49,57 @@ func TestTests(t *testing.T) {
}
var (
k3sContainer *k3s.K3sContainer
hostIP string
restcfg *rest.Config
k8s *kubernetes.Clientset
k8sClient client.Client
kubeconfigPath string
k3sContainer *k3s.K3sContainer
hostIP string
restcfg *rest.Config
k8s *kubernetes.Clientset
k8sClient client.Client
kubeconfigPath string
repo string
helmActionConfig *action.Configuration
)
var _ = BeforeSuite(func() {
var err error
ctx := context.Background()
GinkgoWriter.Println("GOCOVERDIR:", os.Getenv("GOCOVERDIR"))
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:v1.32.1-k3s1")
Expect(err).To(Not(HaveOccurred()))
repo = os.Getenv("REPO")
if repo == "" {
repo = "rancher"
}
hostIP, err = k3sContainer.ContainerIP(ctx)
Expect(err).To(Not(HaveOccurred()))
_, dockerInstallEnabled := os.LookupEnv("K3K_DOCKER_INSTALL")
GinkgoWriter.Println("K3s containerIP: " + hostIP)
kubeconfig, err := k3sContainer.GetKubeConfig(context.Background())
Expect(err).To(Not(HaveOccurred()))
tmpFile, err := os.CreateTemp("", "kubeconfig-")
Expect(err).To(Not(HaveOccurred()))
_, err = tmpFile.Write(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
Expect(tmpFile.Close()).To(Succeed())
kubeconfigPath = tmpFile.Name()
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
DeferCleanup(os.Remove, kubeconfigPath)
initKubernetesClient(kubeconfig)
installK3kChart(ctx, kubeconfig)
if dockerInstallEnabled {
installK3SDocker(ctx)
initKubernetesClient(ctx)
installK3kChart()
} else {
initKubernetesClient(ctx)
}
patchPVC(ctx, k8s)
})
func initKubernetesClient(kubeconfig []byte) {
var err error
func initKubernetesClient(ctx context.Context) {
var (
err error
kubeconfig []byte
)
kubeconfigPath := os.Getenv("KUBECONFIG")
Expect(kubeconfigPath).To(Not(BeEmpty()))
kubeconfig, err = os.ReadFile(kubeconfigPath)
Expect(err).To(Not(HaveOccurred()))
restcfg, err = clientcmd.RESTConfigFromKubeConfig(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
hostIP, err = getServerIP(ctx, restcfg)
Expect(err).To(Not(HaveOccurred()))
k8s, err = kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))
@@ -107,6 +109,7 @@ func initKubernetesClient(kubeconfig []byte) {
logger, err := zap.NewDevelopment()
Expect(err).NotTo(HaveOccurred())
log.SetLogger(zapr.NewLogger(logger))
}
@@ -115,30 +118,74 @@ func buildScheme() *runtime.Scheme {
err := corev1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
err = v1beta1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}
func installK3kChart(ctx context.Context, kubeconfig []byte) {
func installK3SDocker(ctx context.Context) {
var (
err error
kubeconfig []byte
)
k3sHostVersion := os.Getenv("K3S_HOST_VERSION")
if k3sHostVersion == "" {
k3sHostVersion = "v1.32.1+k3s1"
}
k3sHostVersion = strings.ReplaceAll(k3sHostVersion, "+", "-")
k3sContainer, err = k3s.Run(ctx, "rancher/k3s:"+k3sHostVersion)
Expect(err).To(Not(HaveOccurred()))
containerIP, err := k3sContainer.ContainerIP(ctx)
Expect(err).To(Not(HaveOccurred()))
GinkgoWriter.Println("K3s containerIP: " + containerIP)
kubeconfig, err = k3sContainer.GetKubeConfig(context.Background())
Expect(err).To(Not(HaveOccurred()))
tmpFile, err := os.CreateTemp("", "kubeconfig-")
Expect(err).To(Not(HaveOccurred()))
_, err = tmpFile.Write(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
Expect(tmpFile.Close()).To(Succeed())
kubeconfigPath = tmpFile.Name()
err = k3sContainer.LoadImages(ctx, repo+"/k3k:dev", repo+"/k3k-kubelet:dev")
Expect(err).To(Not(HaveOccurred()))
DeferCleanup(os.Remove, kubeconfigPath)
Expect(os.Setenv("KUBECONFIG", kubeconfigPath)).To(Succeed())
GinkgoWriter.Print(kubeconfigPath)
GinkgoWriter.Print(string(kubeconfig))
}
func installK3kChart() {
pwd, err := os.Getwd()
Expect(err).To(Not(HaveOccurred()))
k3kChart, err := loader.Load(path.Join(pwd, "../charts/k3k"))
Expect(err).To(Not(HaveOccurred()))
actionConfig := new(action.Configuration)
helmActionConfig = new(action.Configuration)
kubeconfig, err := os.ReadFile(kubeconfigPath)
Expect(err).To(Not(HaveOccurred()))
restClientGetter, err := NewRESTClientGetter(kubeconfig)
Expect(err).To(Not(HaveOccurred()))
err = actionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
err = helmActionConfig.Init(restClientGetter, k3kNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...any) {
GinkgoWriter.Printf("helm debug: "+format+"\n", v...)
})
Expect(err).To(Not(HaveOccurred()))
iCli := action.NewInstall(actionConfig)
iCli := action.NewInstall(helmActionConfig)
iCli.ReleaseName = k3kName
iCli.Namespace = k3kNamespace
iCli.CreateNamespace = true
@@ -148,7 +195,7 @@ func installK3kChart(ctx context.Context, kubeconfig []byte) {
controllerMap, _ := k3kChart.Values["controller"].(map[string]any)
imageMap, _ := controllerMap["image"].(map[string]any)
maps.Copy(imageMap, map[string]any{
"repository": "rancher/k3k",
"repository": repo + "/k3k",
"tag": "dev",
"pullPolicy": "IfNotPresent",
})
@@ -157,13 +204,10 @@ func installK3kChart(ctx context.Context, kubeconfig []byte) {
sharedAgentMap, _ := agentMap["shared"].(map[string]any)
sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any)
maps.Copy(sharedAgentImageMap, map[string]any{
"repository": "rancher/k3k-kubelet",
"repository": repo + "/k3k-kubelet",
"tag": "dev",
})
err = k3sContainer.LoadImages(ctx, "rancher/k3k:dev", "rancher/k3k-kubelet:dev")
Expect(err).To(Not(HaveOccurred()))
release, err := iCli.Run(k3kChart, k3kChart.Values)
Expect(err).To(Not(HaveOccurred()))
@@ -275,21 +319,22 @@ var _ = AfterSuite(func() {
goCoverDir := os.Getenv("GOCOVERDIR")
if goCoverDir == "" {
goCoverDir = path.Join(os.TempDir(), "covdata")
Expect(os.Mkdir(goCoverDir, 0o755)).To(Succeed())
Expect(os.MkdirAll(goCoverDir, 0o755)).To(Succeed())
}
dumpK3kCoverageData(ctx, goCoverDir)
if k3sContainer != nil {
// dump k3s logs
k3sLogs, err := k3sContainer.Logs(ctx)
Expect(err).To(Not(HaveOccurred()))
writeLogs("k3s.log", k3sLogs)
// dump k3s logs
k3sLogs, err := k3sContainer.Logs(ctx)
Expect(err).To(Not(HaveOccurred()))
writeLogs("k3s.log", k3sLogs)
// dump k3k controller logs
k3kLogs := getK3kLogs(ctx)
writeLogs("k3k.log", k3kLogs)
// dump k3k controller logs
k3kLogs := getK3kLogs(ctx)
writeLogs("k3k.log", k3kLogs)
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
testcontainers.CleanupContainer(GinkgoTB(), k3sContainer)
}
})
// dumpK3kCoverageData will kill the K3k controller container to force it to dump the coverage data.