Compare commits

...

11 Commits

Author SHA1 Message Date
Hussein Galal
8b0383f35e Fix chart release action (#210)
* Fix chart release action

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix chart release action

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-01-23 21:02:34 +02:00
Enrico Candino
9e52c375a0 bump urfave/cli to v2 (#205) 2025-01-23 10:14:01 +01:00
Hussein Galal
ca8f30fd9e upgrade chart (#207)
Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-01-23 02:30:12 +02:00
Hussein Galal
931c7c5fcb Fix secret tokens and DNS translation (#200)
* Include init containers in token translation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Fix kubernetes.defaul service DNS translation

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add skip test var to dapper

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* Add kubelet version and image pull policy to the shared agent

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* fixes

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
2025-01-23 01:55:05 +02:00
Enrico Candino
fd6ed8184f removed antiaffinity (#199) 2025-01-22 18:34:30 +01:00
Enrico Candino
c285004944 fix release tag (#201) 2025-01-22 15:18:10 +01:00
Enrico Candino
b0aa22b2f4 Simplify Cluster spec (#193)
* removed some required parameters, adding defaults

* add hostVersion in Status field

* fix tests
2025-01-21 21:19:44 +01:00
Enrico Candino
3f49593f96 Add Cluster creation test (#192)
* added k3kcli to path

* test create cluster

* updated ptr

* added cluster creation test
2025-01-21 17:53:42 +01:00
Enrico Candino
0b3a5f250e Added golangci-lint action (#197)
* added golangci-lint action

* linters

* cleanup linters

* fix error, increase timeout

* removed unnecessary call to Stringer
2025-01-21 11:30:57 +01:00
Enrico Candino
e7671134d2 fixed missing version (#196) 2025-01-21 10:52:27 +01:00
Enrico Candino
f9b3d62413 bump go1.23 (#198) 2025-01-21 10:50:23 +01:00
44 changed files with 702 additions and 238 deletions

View File

@@ -4,7 +4,7 @@ on:
- "chart-*"
env:
GH_TOKEN: ${{ github.token }}
GITHUB_TOKEN: ${{ github.token }}
name: Chart
permissions:
@@ -28,4 +28,3 @@ jobs:
- name: Index Chart
run: |
make index-chart

View File

@@ -63,7 +63,7 @@ jobs:
- name: Check release tag
id: release-tag
run: |
CURRENT_TAG=$(git describe --tag --always)
CURRENT_TAG=$(git describe --tag --always --match="v[0-9]*")
if git show-ref --tags ${CURRENT_TAG} --quiet; then
echo "tag ${CURRENT_TAG} already exists";

View File

@@ -9,6 +9,23 @@ permissions:
contents: read
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
args: --timeout=5m
version: v1.60
tests:
runs-on: ubuntu-latest
@@ -30,9 +47,7 @@ jobs:
- name: Install tools
run: |
go install github.com/onsi/ginkgo/v2/ginkgo
# With Golang 1.22 we need to use the release-0.18 branch
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@release-0.18
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
ENVTEST_BIN=$(setup-envtest use -p path)
sudo mkdir -p /usr/local/kubebuilder/bin
@@ -66,7 +81,14 @@ jobs:
run: go install github.com/onsi/ginkgo/v2/ginkgo
- name: Build
run: ./scripts/build
run: |
./scripts/build
# add k3kcli to $PATH
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Check k3kcli
run: k3kcli -v
- name: Run tests
run: ginkgo -v ./tests

9
.golangci.yml Normal file
View File

@@ -0,0 +1,9 @@
linters:
enable:
# default linters
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- unused

View File

@@ -20,6 +20,9 @@ builds:
- "amd64"
- "arm64"
- "s390x"
ldflags:
- -w -s # strip debug info and symbol table
- -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}"
- id: k3k-kubelet
main: ./k3k-kubelet
@@ -32,7 +35,10 @@ builds:
- "amd64"
- "arm64"
- "s390x"
ldflags:
- -w -s # strip debug info and symbol table
- -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}"
- id: k3kcli
main: ./cli
binary: k3kcli
@@ -41,6 +47,9 @@ builds:
goarch:
- "amd64"
- "arm64"
ldflags:
- -w -s # strip debug info and symbol table
- -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}"
archives:
- format: binary

View File

@@ -1,4 +1,4 @@
ARG GOLANG=rancher/hardened-build-base:v1.22.2b1
ARG GOLANG=rancher/hardened-build-base:v1.23.4b1
FROM ${GOLANG}
ARG DAPPER_HOST_ARCH
@@ -17,15 +17,13 @@ ENV CONTROLLER_GEN_VERSION v0.14.0
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_GEN_VERSION}
# Tool to setup the envtest framework to run the controllers integration tests
# Note: With Golang 1.22 we need to use the release-0.18 branch
ENV SETUP_ENVTEST_VERSION release-0.18
RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@${SETUP_ENVTEST_VERSION} && \
RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest && \
ENVTEST_BIN=$(setup-envtest use -p path) && \
mkdir -p /usr/local/kubebuilder/bin && \
cp $ENVTEST_BIN/* /usr/local/kubebuilder/bin
ENV GO111MODULE on
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS GITHUB_TOKEN SKIP_TESTS
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3k/
ENV DAPPER_OUTPUT ./bin ./dist ./deploy ./charts
ENV DAPPER_DOCKER_SOCKET true

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.1.5-r5
appVersion: v0.2.2-rc2
version: 0.1.5-r1
appVersion: v0.2.2-rc4

View File

@@ -36,6 +36,7 @@ spec:
metadata:
type: object
spec:
default: {}
properties:
addons:
description: Addons is a list of secrets containing raw YAML which
@@ -55,6 +56,7 @@ spec:
type: string
type: array
agents:
default: 0
description: Agents is the number of K3s pods to run in agent (worker)
mode.
format: int32
@@ -179,6 +181,7 @@ spec:
type: string
type: array
servers:
default: 1
description: Servers is the number of K3s pods to run in server (controlplane)
mode.
format: int32
@@ -218,11 +221,6 @@ spec:
description: Version is a string representing the Kubernetes version
to be used by the virtual nodes.
type: string
required:
- agents
- mode
- servers
- version
type: object
status:
properties:
@@ -230,6 +228,8 @@ spec:
type: string
clusterDNS:
type: string
hostVersion:
type: string
persistence:
properties:
storageClassName:
@@ -250,8 +250,6 @@ spec:
type: string
type: array
type: object
required:
- spec
type: object
served: true
storage: true

View File

@@ -24,6 +24,8 @@ spec:
value: {{ .Values.host.clusterCIDR }}
- name: SHARED_AGENT_IMAGE
value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}"
- name: SHARED_AGENT_PULL_POLICY
value: {{ .Values.sharedAgent.image.pullPolicy }}
ports:
- containerPort: 8080
name: https

View File

@@ -26,3 +26,4 @@ sharedAgent:
image:
repository: "rancher/k3k-kubelet"
tag: ""
pullPolicy: ""

View File

@@ -2,30 +2,26 @@ package cluster
import (
"github.com/rancher/k3k/cli/cmds"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
)
var subcommands = []cli.Command{
var subcommands = []*cli.Command{
{
Name: "create",
Usage: "Create new cluster",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: create,
Flags: append(cmds.CommonFlags, clusterCreateFlags...),
Name: "create",
Usage: "Create new cluster",
Action: create,
Flags: append(cmds.CommonFlags, clusterCreateFlags...),
},
{
Name: "delete",
Usage: "Delete an existing cluster",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: delete,
Flags: append(cmds.CommonFlags, clusterDeleteFlags...),
Name: "delete",
Usage: "Delete an existing cluster",
Action: delete,
Flags: append(cmds.CommonFlags, clusterDeleteFlags...),
},
}
func NewCommand() cli.Command {
return cli.Command{
func NewCommand() *cli.Command {
return &cli.Command{
Name: "cluster",
Usage: "cluster command",
Subcommands: subcommands,

View File

@@ -16,7 +16,7 @@ import (
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -51,65 +51,65 @@ var (
mode string
clusterCreateFlags = []cli.Flag{
cli.StringFlag{
&cli.StringFlag{
Name: "name",
Usage: "name of the cluster",
Destination: &name,
},
cli.Int64Flag{
&cli.Int64Flag{
Name: "servers",
Usage: "number of servers",
Destination: &servers,
Value: 1,
},
cli.Int64Flag{
&cli.Int64Flag{
Name: "agents",
Usage: "number of agents",
Destination: &agents,
},
cli.StringFlag{
&cli.StringFlag{
Name: "token",
Usage: "token of the cluster",
Destination: &token,
},
cli.StringFlag{
&cli.StringFlag{
Name: "cluster-cidr",
Usage: "cluster CIDR",
Destination: &clusterCIDR,
},
cli.StringFlag{
&cli.StringFlag{
Name: "service-cidr",
Usage: "service CIDR",
Destination: &serviceCIDR,
},
cli.StringFlag{
&cli.StringFlag{
Name: "persistence-type",
Usage: "Persistence mode for the nodes (ephermal, static, dynamic)",
Value: server.EphermalNodesType,
Destination: &persistenceType,
},
cli.StringFlag{
&cli.StringFlag{
Name: "storage-class-name",
Usage: "Storage class name for dynamic persistence type",
Destination: &storageClassName,
},
cli.StringSliceFlag{
&cli.StringSliceFlag{
Name: "server-args",
Usage: "servers extra arguments",
Value: &serverArgs,
},
cli.StringSliceFlag{
&cli.StringSliceFlag{
Name: "agent-args",
Usage: "agents extra arguments",
Value: &agentArgs,
},
cli.StringFlag{
&cli.StringFlag{
Name: "version",
Usage: "k3s version",
Destination: &version,
Value: "v1.26.1-k3s1",
},
cli.StringFlag{
&cli.StringFlag{
Name: "mode",
Usage: "k3k mode type",
Destination: &mode,
@@ -153,8 +153,8 @@ func create(clx *cli.Context) error {
int32(agents),
clusterCIDR,
serviceCIDR,
serverArgs,
agentArgs,
serverArgs.Value(),
agentArgs.Value(),
)
cluster.Spec.Expose = &v1alpha1.ExposeConfig{

View File

@@ -6,7 +6,7 @@ import (
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -14,7 +14,7 @@ import (
var (
clusterDeleteFlags = []cli.Flag{
cli.StringFlag{
&cli.StringFlag{
Name: "name",
Usage: "name of the cluster",
Destination: &name,

View File

@@ -14,7 +14,7 @@ import (
"github.com/rancher/k3k/pkg/controller/certs"
"github.com/rancher/k3k/pkg/controller/kubeconfig"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@@ -39,33 +39,33 @@ var (
expirationDays int64
configName string
generateKubeconfigFlags = []cli.Flag{
cli.StringFlag{
&cli.StringFlag{
Name: "name",
Usage: "cluster name",
Destination: &name,
},
cli.StringFlag{
&cli.StringFlag{
Name: "config-name",
Usage: "the name of the generated kubeconfig file",
Destination: &configName,
},
cli.StringFlag{
&cli.StringFlag{
Name: "cn",
Usage: "Common name (CN) of the generated certificates for the kubeconfig",
Destination: &cn,
Value: controller.AdminCommonName,
},
cli.StringSliceFlag{
&cli.StringSliceFlag{
Name: "org",
Usage: "Organization name (ORG) of the generated certificates for the kubeconfig",
Value: &org,
},
cli.StringSliceFlag{
&cli.StringSliceFlag{
Name: "altNames",
Usage: "altNames of the generated certificates for the kubeconfig",
Value: &altNames,
},
cli.Int64Flag{
&cli.Int64Flag{
Name: "expiration-days",
Usage: "Expiration date of the certificates used for the kubeconfig",
Destination: &expirationDays,
@@ -74,19 +74,18 @@ var (
}
)
var subcommands = []cli.Command{
var subcommands = []*cli.Command{
{
Name: "generate",
Usage: "Generate kubeconfig for clusters",
SkipFlagParsing: false,
SkipArgReorder: true,
Action: generate,
Flags: append(cmds.CommonFlags, generateKubeconfigFlags...),
},
}
func NewCommand() cli.Command {
return cli.Command{
func NewCommand() *cli.Command {
return &cli.Command{
Name: "kubeconfig",
Usage: "Manage kubeconfig for clusters",
Subcommands: subcommands,
@@ -123,13 +122,15 @@ func generate(clx *cli.Context) error {
}
host := strings.Split(url.Host, ":")
certAltNames := certs.AddSANs(altNames)
if org == nil {
org = cli.StringSlice{user.SystemPrivilegedGroup}
certAltNames := certs.AddSANs(altNames.Value())
orgs := org.Value()
if orgs == nil {
orgs = []string{user.SystemPrivilegedGroup}
}
cfg := kubeconfig.KubeConfig{
CN: cn,
ORG: org,
ORG: orgs,
ExpiryDate: time.Hour * 24 * time.Duration(expirationDays),
AltNames: certAltNames,
}

View File

@@ -2,7 +2,7 @@ package cmds
import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
)
const (
@@ -14,13 +14,13 @@ var (
Kubeconfig string
namespace string
CommonFlags = []cli.Flag{
cli.StringFlag{
&cli.StringFlag{
Name: "kubeconfig",
EnvVar: "KUBECONFIG",
EnvVars: []string{"KUBECONFIG"},
Usage: "Kubeconfig path",
Destination: &Kubeconfig,
},
cli.StringFlag{
&cli.StringFlag{
Name: "namespace",
Usage: "Namespace to create the k3k cluster in",
Destination: &namespace,
@@ -33,11 +33,11 @@ func NewApp() *cli.App {
app.Name = "k3kcli"
app.Usage = "CLI for K3K"
app.Flags = []cli.Flag{
cli.BoolFlag{
&cli.BoolFlag{
Name: "debug",
Usage: "Turn on debug logs",
Destination: &debug,
EnvVar: "K3K_DEBUG",
EnvVars: []string{"K3K_DEBUG"},
},
}

View File

@@ -1,28 +1,28 @@
package main
import (
"fmt"
"os"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/cli/cmds/cluster"
"github.com/rancher/k3k/cli/cmds/kubeconfig"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
const (
program = "k3kcli"
version = "dev"
gitCommit = "HEAD"
"github.com/urfave/cli/v2"
)
func main() {
app := cmds.NewApp()
app.Commands = []cli.Command{
app.Version = buildinfo.Version
cli.VersionPrinter = func(cCtx *cli.Context) {
fmt.Println("k3kcli Version: " + buildinfo.Version)
}
app.Commands = []*cli.Command{
cluster.NewCommand(),
kubeconfig.NewCommand(),
}
app.Version = version + " (" + gitCommit + ")"
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)

7
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/rancher/k3k
go 1.22.7
go 1.23.4
replace (
github.com/google/cel-go => github.com/google/cel-go v0.17.7
@@ -19,7 +19,7 @@ require (
github.com/sirupsen/logrus v1.9.3
github.com/testcontainers/testcontainers-go v0.35.0
github.com/testcontainers/testcontainers-go/modules/k3s v0.35.0
github.com/urfave/cli v1.22.12
github.com/urfave/cli/v2 v2.27.5
github.com/virtual-kubelet/virtual-kubelet v1.11.0
go.etcd.io/etcd/api/v3 v3.5.14
go.etcd.io/etcd/client/v3 v3.5.14
@@ -61,7 +61,7 @@ require (
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
@@ -168,6 +168,7 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
go.opencensus.io v0.24.0 // indirect

11
go.sum
View File

@@ -606,7 +606,6 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@@ -712,9 +711,9 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
@@ -1209,8 +1208,8 @@ github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+F
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8=
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
github.com/virtual-kubelet/virtual-kubelet v1.11.0 h1:LOMcZQfP083xmYH9mYtyHAR+ybFbK1uMaRA+EtDcd1I=
github.com/virtual-kubelet/virtual-kubelet v1.11.0/go.mod h1:WQfPHbIlzfhMNYkh6hFXF1ctGfNM8UJCYLYpLa/trxc=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -1225,6 +1224,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=

View File

@@ -18,6 +18,7 @@ type config struct {
VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"`
KubeletPort string `yaml:"kubeletPort,omitempty"`
ServerIP string `yaml:"serverIP,omitempty"`
Version string `yaml:"version,omitempty"`
}
func (c *config) unmarshalYAML(data []byte) error {
@@ -54,6 +55,9 @@ func (c *config) unmarshalYAML(data []byte) error {
if c.ServerIP == "" {
c.ServerIP = conf.ServerIP
}
if c.Version == "" {
c.Version = conf.Version
}
return nil
}

View File

@@ -66,6 +66,10 @@ func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object)
// note that this doesn't do any type safety - fix this
// when generics work
c.Translater.TranslateTo(s)
// Remove service-account-token types when synced to the host
if s.Type == v1.SecretTypeServiceAccountToken {
s.Type = v1.SecretTypeOpaque
}
return s, nil
},
Logger: c.Logger,
@@ -109,7 +113,7 @@ func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Objec
ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]
c.RUnlock()
if !ok {
return fmt.Errorf("no controller found for gvk" + obj.GetObjectKind().GroupVersionKind().String())
return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind())
}
return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName())
}

View File

@@ -187,8 +187,8 @@ func clusterIP(ctx context.Context, serviceName, clusterNamespace string, hostCl
return service.Spec.ClusterIP, nil
}
func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP string) error {
providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP)
func (k *kubelet) registerNode(ctx context.Context, agentIP, srvPort, namespace, name, hostname, serverIP, dnsIP, version string) error {
providerFunc := k.newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version)
nodeOpts := k.nodeOpts(ctx, srvPort, namespace, name, hostname, agentIP)
var err error
@@ -235,14 +235,14 @@ func (k *kubelet) start(ctx context.Context) {
k.logger.Info("node exited successfully")
}
func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP string) nodeutil.NewProviderFunc {
func (k *kubelet) newProviderFunc(namespace, name, hostname, agentIP, serverIP, dnsIP, version string) nodeutil.NewProviderFunc {
return func(pc nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
utilProvider, err := provider.New(*k.hostConfig, k.hostMgr, k.virtualMgr, k.logger, namespace, name, serverIP, dnsIP)
if err != nil {
return nil, nil, errors.New("unable to make nodeutil provider: " + err.Error())
}
provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster)
provider.ConfigureNode(k.logger, pc.Node, hostname, k.port, agentIP, utilProvider.CoreClient, utilProvider.VirtualClient, k.virtualCluster, version)
return utilProvider, &provider.Node{}, nil
}

View File

@@ -7,7 +7,7 @@ import (
"github.com/go-logr/zapr"
"github.com/rancher/k3k/pkg/log"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
"go.uber.org/zap"
ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
)
@@ -24,67 +24,73 @@ func main() {
app.Name = "k3k-kubelet"
app.Usage = "virtual kubelet implementation k3k"
app.Flags = []cli.Flag{
cli.StringFlag{
&cli.StringFlag{
Name: "cluster-name",
Usage: "Name of the k3k cluster",
Destination: &cfg.ClusterName,
EnvVar: "CLUSTER_NAME",
EnvVars: []string{"CLUSTER_NAME"},
},
cli.StringFlag{
&cli.StringFlag{
Name: "cluster-namespace",
Usage: "Namespace of the k3k cluster",
Destination: &cfg.ClusterNamespace,
EnvVar: "CLUSTER_NAMESPACE",
EnvVars: []string{"CLUSTER_NAMESPACE"},
},
cli.StringFlag{
&cli.StringFlag{
Name: "cluster-token",
Usage: "K3S token of the k3k cluster",
Destination: &cfg.Token,
EnvVar: "CLUSTER_TOKEN",
EnvVars: []string{"CLUSTER_TOKEN"},
},
cli.StringFlag{
&cli.StringFlag{
Name: "host-config-path",
Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config",
Destination: &cfg.HostConfigPath,
EnvVar: "HOST_KUBECONFIG",
EnvVars: []string{"HOST_KUBECONFIG"},
},
cli.StringFlag{
&cli.StringFlag{
Name: "virtual-config-path",
Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster",
Destination: &cfg.VirtualConfigPath,
EnvVar: "CLUSTER_NAME",
EnvVars: []string{"CLUSTER_NAME"},
},
cli.StringFlag{
&cli.StringFlag{
Name: "kubelet-port",
Usage: "kubelet API port number",
Destination: &cfg.KubeletPort,
EnvVar: "SERVER_PORT",
EnvVars: []string{"SERVER_PORT"},
Value: "10250",
},
cli.StringFlag{
&cli.StringFlag{
Name: "agent-hostname",
Usage: "Agent Hostname used for TLS SAN for the kubelet server",
Destination: &cfg.AgentHostname,
EnvVar: "AGENT_HOSTNAME",
EnvVars: []string{"AGENT_HOSTNAME"},
},
cli.StringFlag{
&cli.StringFlag{
Name: "server-ip",
Usage: "Server IP used for registering the virtual kubelet to the cluster",
Destination: &cfg.ServerIP,
EnvVar: "SERVER_IP",
EnvVars: []string{"SERVER_IP"},
},
cli.StringFlag{
&cli.StringFlag{
Name: "version",
Usage: "Version of kubernetes server",
Destination: &cfg.Version,
EnvVars: []string{"VERSION"},
},
&cli.StringFlag{
Name: "config",
Usage: "Path to k3k-kubelet config file",
Destination: &configFile,
EnvVar: "CONFIG_FILE",
EnvVars: []string{"CONFIG_FILE"},
Value: "/etc/rancher/k3k/config.yaml",
},
cli.BoolFlag{
&cli.BoolFlag{
Name: "debug",
Usage: "Enable debug logging",
Destination: &debug,
EnvVar: "DEBUG",
EnvVars: []string{"DEBUG"},
},
}
app.Before = func(clx *cli.Context) error {
@@ -98,7 +104,7 @@ func main() {
}
}
func run(clx *cli.Context) {
func run(clx *cli.Context) error {
ctx := context.Background()
if err := cfg.parse(configFile); err != nil {
logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err))
@@ -112,9 +118,11 @@ func run(clx *cli.Context) {
logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err))
}
if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP); err != nil {
if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP, cfg.Version); err != nil {
logger.Fatalw("failed to register new node", zap.Error(err))
}
k.start(ctx)
return nil
}

View File

@@ -15,7 +15,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster) {
func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string) {
node.Status.Conditions = nodeConditions()
node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort)
node.Status.Addresses = []v1.NodeAddress{
@@ -32,6 +32,10 @@ func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servic
node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
node.Labels["kubernetes.io/os"] = "linux"
// configure versions
node.Status.NodeInfo.KubeletVersion = version
node.Status.NodeInfo.KubeProxyVersion = version
updateNodeCapacityInterval := 10 * time.Second
ticker := time.NewTicker(updateNodeCapacityInterval)

View File

@@ -369,7 +369,7 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
}
// inject networking information to the pod including the virtual cluster controlplane endpoint
p.configureNetworking(pod.Name, pod.Namespace, tPod)
p.configureNetworking(pod.Name, pod.Namespace, tPod, p.serverIP)
p.logger.Infow("Creating pod", "Host Namespace", tPod.Namespace, "Host Name", tPod.Name,
"Virtual Namespace", pod.Namespace, "Virtual Name", "env", pod.Name, pod.Spec.Containers[0].Env)
@@ -475,6 +475,7 @@ func (p *Provider) syncConfigmap(ctx context.Context, podNamespace string, confi
// syncSecret will add the secret object to the queue of the syncer controller to be synced to the host cluster
func (p *Provider) syncSecret(ctx context.Context, podNamespace string, secretName string, optional bool) error {
p.logger.Infow("Syncing secret", "Name", secretName, "Namespace", podNamespace, "optional", optional)
var secret corev1.Secret
nsName := types.NamespacedName{
Namespace: podNamespace,
@@ -707,7 +708,13 @@ func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
// configureNetworking will inject network information to each pod to connect them to the
// virtual cluster api server, as well as confiugre DNS information to connect them to the
// synced coredns on the host cluster.
func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1.Pod) {
func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1.Pod, serverIP string) {
// inject serverIP to hostalias for the pod
KubernetesHostAlias := corev1.HostAlias{
IP: serverIP,
Hostnames: []string{"kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"},
}
pod.Spec.HostAliases = append(pod.Spec.HostAliases, KubernetesHostAlias)
// inject networking information to the pod's environment variables
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env,
@@ -733,6 +740,31 @@ func (p *Provider) configureNetworking(podName, podNamespace string, pod *corev1
},
)
}
// handle init contianers as well
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].Env = append(pod.Spec.InitContainers[i].Env,
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT",
Value: "tcp://" + p.serverIP + ":6443",
},
corev1.EnvVar{
Name: "KUBERNETES_PORT_443_TCP_ADDR",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_HOST",
Value: p.serverIP,
},
corev1.EnvVar{
Name: "KUBERNETES_SERVICE_PORT",
Value: "6443",
},
)
}
// injecting cluster DNS IP to the pods except for coredns pod
if !strings.HasPrefix(podName, "coredns") {
pod.Spec.DNSPolicy = corev1.DNSNone

View File

@@ -23,6 +23,12 @@ const (
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
// skip this process if the kube-api-access is already removed from the pod
// this is needed in case users already adds their own custom tokens like in rancher imported clusters
if !isKubeAccessVolumeFound(pod) {
return nil
}
virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token")
virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName)
if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil {
@@ -84,12 +90,30 @@ func (p *Provider) translateToken(pod *corev1.Pod, hostSecretName string) {
addKubeAccessVolume(pod, hostSecretName)
}
func isKubeAccessVolumeFound(pod *corev1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
return true
}
}
return false
}
func removeKubeAccessVolume(pod *corev1.Pod) {
for i, volume := range pod.Spec.Volumes {
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...)
}
}
// init containers
for i, container := range pod.Spec.InitContainers {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts[:j], pod.Spec.InitContainers[i].VolumeMounts[j+1:]...)
}
}
}
for i, container := range pod.Spec.Containers {
for j, mountPath := range container.VolumeMounts {
if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) {
@@ -109,6 +133,14 @@ func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) {
},
},
})
for i := range pod.Spec.InitContainers {
pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts, corev1.VolumeMount{
Name: tokenVolumeName,
MountPath: serviceAccountTokenMountPath,
})
}
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, corev1.VolumeMount{
Name: tokenVolumeName,

69
main.go
View File

@@ -3,17 +3,20 @@ package main
import (
"context"
"errors"
"fmt"
"os"
"github.com/go-logr/zapr"
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/buildinfo"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/clusterset"
"github.com/rancher/k3k/pkg/log"
"github.com/urfave/cli"
"github.com/urfave/cli/v2"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
@@ -22,42 +25,43 @@ import (
"sigs.k8s.io/controller-runtime/pkg/manager"
)
const (
program = "k3k"
version = "dev"
gitCommit = "HEAD"
)
var (
scheme = runtime.NewScheme()
clusterCIDR string
sharedAgentImage string
kubeconfig string
debug bool
logger *log.Logger
flags = []cli.Flag{
cli.StringFlag{
scheme = runtime.NewScheme()
clusterCIDR string
sharedAgentImage string
sharedAgentImagePullPolicy string
kubeconfig string
debug bool
logger *log.Logger
flags = []cli.Flag{
&cli.StringFlag{
Name: "kubeconfig",
EnvVar: "KUBECONFIG",
EnvVars: []string{"KUBECONFIG"},
Usage: "Kubeconfig path",
Destination: &kubeconfig,
},
cli.StringFlag{
&cli.StringFlag{
Name: "cluster-cidr",
EnvVar: "CLUSTER_CIDR",
EnvVars: []string{"CLUSTER_CIDR"},
Usage: "Cluster CIDR to be added to the networkpolicy of the clustersets",
Destination: &clusterCIDR,
},
cli.StringFlag{
&cli.StringFlag{
Name: "shared-agent-image",
EnvVar: "SHARED_AGENT_IMAGE",
EnvVars: []string{"SHARED_AGENT_IMAGE"},
Usage: "K3K Virtual Kubelet image",
Value: "rancher/k3k:k3k-kubelet-dev",
Destination: &sharedAgentImage,
},
cli.BoolFlag{
&cli.StringFlag{
Name: "shared-agent-pull-policy",
EnvVars: []string{"SHARED_AGENT_PULL_POLICY"},
Usage: "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never",
Destination: &sharedAgentImagePullPolicy,
},
&cli.BoolFlag{
Name: "debug",
EnvVar: "DEBUG",
EnvVars: []string{"DEBUG"},
Usage: "Debug level logging",
Destination: &debug,
},
@@ -73,20 +77,24 @@ func main() {
app := cmds.NewApp()
app.Flags = flags
app.Action = run
app.Version = version + " (" + gitCommit + ")"
app.Version = buildinfo.Version
app.Before = func(clx *cli.Context) error {
if err := validate(); err != nil {
return err
}
logger = log.New(debug)
return nil
}
if err := app.Run(os.Args); err != nil {
logger.Fatalw("failed to run k3k controller", zap.Error(err))
}
}
func run(clx *cli.Context) error {
ctx := context.Background()
logger.Info("Starting k3k - Version: " + buildinfo.Version)
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return fmt.Errorf("failed to create config from kubeconfig file: %v", err)
@@ -102,7 +110,7 @@ func run(clx *cli.Context) error {
ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1))))
logger.Info("adding cluster controller")
if err := cluster.Add(ctx, mgr, sharedAgentImage, logger); err != nil {
if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, logger); err != nil {
return fmt.Errorf("failed to add the new cluster controller: %v", err)
}
@@ -129,3 +137,14 @@ func run(clx *cli.Context) error {
return nil
}
func validate() error {
if sharedAgentImagePullPolicy != "" {
if sharedAgentImagePullPolicy != string(v1.PullAlways) &&
sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) &&
sharedAgentImagePullPolicy != string(v1.PullNever) {
return errors.New("invalid value for shared agent image policy")
}
}
return nil
}

View File

@@ -3,5 +3,7 @@ set -e
cd $(dirname $0)/..
echo Running tests
go test -cover -tags=test ./...
if [ -z ${SKIP_TESTS} ]; then
echo Running tests
go test -cover -tags=test ./...
fi

View File

@@ -14,24 +14,35 @@ type Cluster struct {
metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
// +kubebuilder:default={}
// +optional
Spec ClusterSpec `json:"spec"`
Status ClusterStatus `json:"status,omitempty"`
}
type ClusterSpec struct {
// Version is a string representing the Kubernetes version to be used by the virtual nodes.
//
// +optional
Version string `json:"version"`
// Servers is the number of K3s pods to run in server (controlplane) mode.
//
// +kubebuilder:default=1
// +kubebuilder:validation:XValidation:message="cluster must have at least one server",rule="self >= 1"
// +optional
Servers *int32 `json:"servers"`
// Agents is the number of K3s pods to run in agent (worker) mode.
//
// +kubebuilder:default=0
// +kubebuilder:validation:XValidation:message="invalid value for agents",rule="self >= 0"
// +optional
Agents *int32 `json:"agents"`
// NodeSelector is the node selector that will be applied to all server/agent pods.
// In "shared" mode the node selector will be applied also to the workloads.
//
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
@@ -73,10 +84,12 @@ type ClusterSpec struct {
Addons []Addon `json:"addons,omitempty"`
// Mode is the cluster provisioning mode which can be either "shared" or "virtual". Defaults to "shared"
//
// +kubebuilder:default="shared"
// +kubebuilder:validation:Enum=shared;virtual
// +kubebuilder:validation:XValidation:message="mode is immutable",rule="self == oldSelf"
Mode ClusterMode `json:"mode"`
// +optional
Mode ClusterMode `json:"mode,omitempty"`
// Persistence contains options controlling how the etcd data of the virtual cluster is persisted. By default, no data
// persistence is guaranteed, so restart of a virtual cluster pod may result in data loss without this field.
@@ -151,6 +164,7 @@ type NodePortConfig struct {
}
type ClusterStatus struct {
HostVersion string `json:"hostVersion,omitempty"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`

View File

@@ -0,0 +1,3 @@
package buildinfo
var Version = "dev"

View File

@@ -16,11 +16,11 @@ type Agent interface {
Resources() ([]ctrlruntimeclient.Object, error)
}
func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage, token string) Agent {
func New(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage, sharedAgentImagePullPolicy, token string) Agent {
if cluster.Spec.Mode == VirtualNodeMode {
return NewVirtualAgent(cluster, serviceIP, token)
}
return NewSharedAgent(cluster, serviceIP, sharedAgentImage, token)
return NewSharedAgent(cluster, serviceIP, sharedAgentImage, sharedAgentImagePullPolicy, token)
}
func configSecretName(clusterName string) string {

View File

@@ -26,18 +26,20 @@ const (
)
type SharedAgent struct {
cluster *v1alpha1.Cluster
serviceIP string
sharedAgentImage string
token string
cluster *v1alpha1.Cluster
serviceIP string
image string
imagePullPolicy string
token string
}
func NewSharedAgent(cluster *v1alpha1.Cluster, serviceIP, sharedAgentImage, token string) Agent {
func NewSharedAgent(cluster *v1alpha1.Cluster, serviceIP, image, imagePullPolicy, token string) Agent {
return &SharedAgent{
cluster: cluster,
serviceIP: serviceIP,
sharedAgentImage: sharedAgentImage,
token: token,
cluster: cluster,
serviceIP: serviceIP,
image: image,
imagePullPolicy: imagePullPolicy,
token: token,
}
}
@@ -60,13 +62,18 @@ func (s *SharedAgent) Config() ctrlruntimeclient.Object {
}
func sharedAgentData(cluster *v1alpha1.Cluster, token, nodeName, ip string) string {
version := cluster.Spec.Version
if cluster.Spec.Version == "" {
version = cluster.Status.HostVersion
}
return fmt.Sprintf(`clusterName: %s
clusterNamespace: %s
nodeName: %s
agentHostname: %s
serverIP: %s
token: %s`,
cluster.Name, cluster.Namespace, nodeName, nodeName, ip, token)
token: %s
version: %s`,
cluster.Name, cluster.Namespace, nodeName, nodeName, ip, token, version)
}
func (s *SharedAgent) Resources() ([]ctrlruntimeclient.Object, error) {
@@ -86,12 +93,10 @@ func (s *SharedAgent) Resources() ([]ctrlruntimeclient.Object, error) {
}
func (s *SharedAgent) deployment() *apps.Deployment {
selector := &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": s.cluster.Name,
"type": "agent",
"mode": "shared",
},
labels := map[string]string{
"cluster": s.cluster.Name,
"type": "agent",
"mode": "shared",
}
return &apps.Deployment{
@@ -102,34 +107,26 @@ func (s *SharedAgent) deployment() *apps.Deployment {
ObjectMeta: metav1.ObjectMeta{
Name: s.Name(),
Namespace: s.cluster.Namespace,
Labels: selector.MatchLabels,
Labels: labels,
},
Spec: apps.DeploymentSpec{
Selector: selector,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: selector.MatchLabels,
Labels: labels,
},
Spec: s.podSpec(selector),
Spec: s.podSpec(),
},
},
}
}
func (s *SharedAgent) podSpec(affinitySelector *metav1.LabelSelector) v1.PodSpec {
func (s *SharedAgent) podSpec() v1.PodSpec {
var limit v1.ResourceList
return v1.PodSpec{
Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: affinitySelector,
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
ServiceAccountName: s.Name(),
Volumes: []v1.Volume{
{
@@ -172,8 +169,8 @@ func (s *SharedAgent) podSpec(affinitySelector *metav1.LabelSelector) v1.PodSpec
Containers: []v1.Container{
{
Name: s.Name(),
Image: s.sharedAgentImage,
ImagePullPolicy: v1.PullAlways,
Image: s.image,
ImagePullPolicy: v1.PullPolicy(s.imagePullPolicy),
Resources: v1.ResourceRequirements{
Limits: limit,
},

View File

@@ -97,16 +97,6 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
var limit v1.ResourceList
args = append([]string{"agent", "--config", "/opt/rancher/k3s/config.yaml"}, args...)
podSpec := v1.PodSpec{
Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: affinitySelector,
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "config",
@@ -161,9 +151,8 @@ func (v *VirtualAgent) podSpec(image, name string, args []string, affinitySelect
},
Containers: []v1.Container{
{
Name: name,
Image: image,
ImagePullPolicy: v1.PullAlways,
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: ptr.To(true),
},

View File

@@ -20,6 +20,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
ctrl "sigs.k8s.io/controller-runtime"
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
ctrlruntimecontroller "sigs.k8s.io/controller-runtime/pkg/controller"
@@ -44,21 +45,32 @@ const (
)
type ClusterReconciler struct {
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
SharedAgentImage string
logger *log.Logger
DiscoveryClient *discovery.DiscoveryClient
Client ctrlruntimeclient.Client
Scheme *runtime.Scheme
SharedAgentImage string
SharedAgentImagePullPolicy string
logger *log.Logger
}
// Add adds a new controller to the manager
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage string, logger *log.Logger) error {
func Add(ctx context.Context, mgr manager.Manager, sharedAgentImage, sharedAgentImagePullPolicy string, logger *log.Logger) error {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
return err
}
// initialize a new Reconciler
reconciler := ClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
SharedAgentImage: sharedAgentImage,
logger: logger.Named(clusterController),
DiscoveryClient: discoveryClient,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
SharedAgentImage: sharedAgentImage,
SharedAgentImagePullPolicy: sharedAgentImagePullPolicy,
logger: logger.Named(clusterController),
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.Cluster{}).
WithOptions(ctrlruntimecontroller.Options{
@@ -76,6 +88,22 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
}
// if the Version is not specified we will try to use the same Kubernetes version of the host.
// This version is stored in the Status object, and it will not be updated if already set.
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
hostVersion, err := c.DiscoveryClient.ServerVersion()
if err != nil {
return reconcile.Result{}, err
}
// update Status HostVersion
cluster.Status.HostVersion = fmt.Sprintf("v%s.%s.0-k3s1", hostVersion.Major, hostVersion.Minor)
if err := c.Client.Status().Update(ctx, &cluster); err != nil {
return reconcile.Result{}, err
}
}
if cluster.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
controllerutil.AddFinalizer(&cluster, clusterFinalizerName)
@@ -338,7 +366,7 @@ func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, clus
}
func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster, serviceIP, token string) error {
agent := agent.New(cluster, serviceIP, c.SharedAgentImage, token)
agent := agent.New(cluster, serviceIP, c.SharedAgentImage, c.SharedAgentImagePullPolicy, token)
agentsConfig := agent.Config()
agentResources, err := agent.Resources()
if err != nil {

View File

@@ -0,0 +1,91 @@
package cluster_test
import (
"context"
"path/filepath"
"testing"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/log"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Cluster Controller Suite")
}
var (
testEnv *envtest.Environment
k8s *kubernetes.Clientset
k8sClient client.Client
ctx context.Context
cancel context.CancelFunc
)
var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")},
ErrorIfCRDPathMissing: true,
}
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
k8s, err = kubernetes.NewForConfig(cfg)
Expect(err).NotTo(HaveOccurred())
scheme := buildScheme()
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
ctx, cancel = context.WithCancel(context.Background())
err = cluster.Add(ctx, mgr, "", "", &log.Logger{SugaredLogger: zap.NewNop().Sugar()})
Expect(err).NotTo(HaveOccurred())
go func() {
defer GinkgoRecover()
err = mgr.Start(ctx)
Expect(err).NotTo(HaveOccurred(), "failed to run manager")
}()
})
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := corev1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = appsv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = networkingv1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
err = v1alpha1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}

View File

@@ -0,0 +1,68 @@
package cluster_test
import (
"context"
"fmt"
"time"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("Cluster Controller", func() {
Context("creating a Cluster", func() {
var (
namespace string
)
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
err := k8sClient.Create(context.Background(), createdNS)
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
})
When("created with a default spec", func() {
It("should have been created with some defaults", func() {
cluster := &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
GenerateName: "clusterset-",
Namespace: namespace,
},
}
err := k8sClient.Create(ctx, cluster)
Expect(err).To(Not(HaveOccurred()))
Expect(cluster.Spec.Mode).To(Equal(v1alpha1.SharedClusterMode))
Expect(cluster.Spec.Agents).To(Equal(ptr.To[int32](0)))
Expect(cluster.Spec.Servers).To(Equal(ptr.To[int32](1)))
Expect(cluster.Spec.Version).To(BeEmpty())
serverVersion, err := k8s.DiscoveryClient.ServerVersion()
Expect(err).To(Not(HaveOccurred()))
expectedHostVersion := fmt.Sprintf("v%s.%s.0-k3s1", serverVersion.Major, serverVersion.Minor)
Eventually(func() string {
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
Expect(err).To(Not(HaveOccurred()))
return cluster.Status.HostVersion
}).
WithTimeout(time.Second * 30).
WithPolling(time.Second).
Should(Equal(expectedHostVersion))
})
})
})
})

View File

@@ -16,8 +16,9 @@ const (
nginxBackendProtocolAnnotation = "nginx.ingress.kubernetes.io/backend-protocol"
nginxSSLRedirectAnnotation = "nginx.ingress.kubernetes.io/ssl-redirect"
serverPort = 6443
etcdPort = 2379
servicePort = 443
serverPort = 6443
etcdPort = 2379
)
func (s *Server) Ingress(ctx context.Context, client client.Client) (*networkingv1.Ingress, error) {

View File

@@ -45,7 +45,7 @@ func New(cluster *v1alpha1.Cluster, client client.Client, token, mode string) *S
}
}
func (s *Server) podSpec(image, name string, persistent bool, affinitySelector *metav1.LabelSelector) v1.PodSpec {
func (s *Server) podSpec(image, name string, persistent bool) v1.PodSpec {
var limit v1.ResourceList
if s.cluster.Spec.Limit != nil && s.cluster.Spec.Limit.ServerLimit != nil {
limit = s.cluster.Spec.Limit.ServerLimit
@@ -53,16 +53,6 @@ func (s *Server) podSpec(image, name string, persistent bool, affinitySelector *
podSpec := v1.PodSpec{
NodeSelector: s.cluster.Spec.NodeSelector,
PriorityClassName: s.cluster.Spec.PriorityClass,
Affinity: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: affinitySelector,
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "initconfig",
@@ -347,7 +337,7 @@ func (s *Server) StatefulServer(ctx context.Context) (*apps.StatefulSet, error)
},
}
podSpec := s.podSpec(image, name, persistent, &selector)
podSpec := s.podSpec(image, name, persistent)
podSpec.Volumes = append(podSpec.Volumes, volumes...)
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)

View File

@@ -5,6 +5,7 @@ import (
"github.com/rancher/k3k/pkg/controller"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service {
@@ -38,6 +39,12 @@ func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service {
Protocol: v1.ProtocolTCP,
Port: serverPort,
},
{
Name: "k3s-service-port",
Protocol: v1.ProtocolTCP,
Port: servicePort,
TargetPort: intstr.FromInt(serverPort),
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,
@@ -71,6 +78,12 @@ func (s *Server) StatefulServerService() *v1.Service {
Protocol: v1.ProtocolTCP,
Port: serverPort,
},
{
Name: "k3s-service-port",
Protocol: v1.ProtocolTCP,
Port: servicePort,
TargetPort: intstr.FromInt(serverPort),
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,

View File

@@ -488,8 +488,8 @@ var _ = Describe("ClusterSet Controller", func() {
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To(int32(1)),
Agents: ptr.To(int32(0)),
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
}
@@ -529,8 +529,8 @@ var _ = Describe("ClusterSet Controller", func() {
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To(int32(1)),
Agents: ptr.To(int32(0)),
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
}
@@ -570,8 +570,8 @@ var _ = Describe("ClusterSet Controller", func() {
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To(int32(1)),
Agents: ptr.To(int32(0)),
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
NodeSelector: map[string]string{"label-1": "value-1"},
},
}
@@ -645,8 +645,8 @@ var _ = Describe("ClusterSet Controller", func() {
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To(int32(1)),
Agents: ptr.To(int32(0)),
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
},
}

View File

@@ -27,9 +27,21 @@ var Backoff = wait.Backoff{
Jitter: 0.1,
}
// K3SImage returns the rancher/k3s image tagged with the specified Version.
// If Version is empty it will use with the same k8s version of the host cluster,
// stored in the Status object. It will return the untagged version as last fallback.
func K3SImage(cluster *v1alpha1.Cluster) string {
return k3SImageName + ":" + cluster.Spec.Version
if cluster.Spec.Version != "" {
return k3SImageName + ":" + cluster.Spec.Version
}
if cluster.Status.HostVersion != "" {
return k3SImageName + ":" + cluster.Status.HostVersion
}
return k3SImageName
}
func nodeAddress(node *v1.Node) string {
var externalIP string
var internalIP string

View File

@@ -2,17 +2,23 @@
set -e pipefail
TAG=$(git describe --tag --always)
TAG=$(git describe --tag --always --match="v[0-9]*")
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
TAG="${TAG}-dirty"
fi
LDFLAGS="-X \"github.com/rancher/k3k/pkg/buildinfo.Version=${TAG}\""
echo "Building k3k..."
echo "Current TAG: ${TAG}"
export CGO_ENABLED=0
GOOS=linux GOARCH=amd64 go build -o bin/k3k
GOOS=linux GOARCH=amd64 go build -o bin/k3k-kubelet ./k3k-kubelet
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k
GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k-kubelet ./k3k-kubelet
# build the cli for the local OS and ARCH
go build -o bin/k3kcli ./cli
go build -ldflags="${LDFLAGS}" -o bin/k3kcli ./cli
docker build -f package/Dockerfile -t rancher/k3k:dev -t rancher/k3k:${TAG} .
docker build -f package/Dockerfile.kubelet -t rancher/k3k-kubelet:dev -t rancher/k3k-kubelet:${TAG} .

88
tests/cluster_test.go Normal file
View File

@@ -0,0 +1,88 @@
package k3k_test
import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
)
var _ = When("a cluster is installed", func() {
var namespace string
BeforeEach(func() {
createdNS := &corev1.Namespace{ObjectMeta: v1.ObjectMeta{GenerateName: "ns-"}}
createdNS, err := k8s.CoreV1().Namespaces().Create(context.Background(), createdNS, v1.CreateOptions{})
Expect(err).To(Not(HaveOccurred()))
namespace = createdNS.Name
})
It("will be created in shared mode", func() {
cluster := v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "mycluster",
Namespace: namespace,
},
Spec: v1alpha1.ClusterSpec{
Mode: v1alpha1.SharedClusterMode,
Servers: ptr.To[int32](1),
Agents: ptr.To[int32](0),
Version: "v1.26.1-k3s1",
},
}
err := k8sClient.Create(context.Background(), &cluster)
Expect(err).To(Not(HaveOccurred()))
By("checking server and kubelet readiness state")
// check that the server Pod and the Kubelet are in Ready state
Eventually(func() bool {
podList, err := k8s.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{})
Expect(err).To(Not(HaveOccurred()))
serverRunning := false
kubeletRunning := false
for _, pod := range podList.Items {
imageName := pod.Spec.Containers[0].Image
imageName = strings.Split(imageName, ":")[0] // remove tag
switch imageName {
case "rancher/k3s":
serverRunning = pod.Status.Phase == corev1.PodRunning
case "rancher/k3k-kubelet":
kubeletRunning = pod.Status.Phase == corev1.PodRunning
}
if serverRunning && kubeletRunning {
return true
}
}
return false
}).
WithTimeout(time.Minute).
WithPolling(time.Second * 5).
Should(BeTrue())
By("checking the existence of the bootstrap secret")
secretName := fmt.Sprintf("k3k-%s-bootstrap", cluster.Name)
Eventually(func() error {
_, err := k8s.CoreV1().Secrets(namespace).Get(context.Background(), secretName, v1.GetOptions{})
return err
}).
WithTimeout(time.Minute * 2).
WithPolling(time.Second * 5).
Should(BeNil())
})
})

View File

@@ -10,16 +10,22 @@ import (
"testing"
"time"
"github.com/go-logr/zapr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/k3s"
"go.uber.org/zap"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func TestTests(t *testing.T) {
@@ -30,6 +36,7 @@ func TestTests(t *testing.T) {
var (
k3sContainer *k3s.K3sContainer
k8s *kubernetes.Clientset
k8sClient client.Client
)
var _ = BeforeSuite(func() {
@@ -52,6 +59,14 @@ func initKubernetesClient(kubeconfig []byte) {
k8s, err = kubernetes.NewForConfig(restcfg)
Expect(err).To(Not(HaveOccurred()))
scheme := buildScheme()
k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme})
Expect(err).NotTo(HaveOccurred())
logger, err := zap.NewDevelopment()
Expect(err).NotTo(HaveOccurred())
log.SetLogger(zapr.NewLogger(logger))
}
func installK3kChart(kubeconfig []byte) {
@@ -155,3 +170,10 @@ var _ = When("k3k is installed", func() {
Should(BeTrue())
})
})
func buildScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
err := v1alpha1.AddToScheme(scheme)
Expect(err).NotTo(HaveOccurred())
return scheme
}