Compare commits

..

4 Commits

Author SHA1 Message Date
Brian Downs
e426380828 add back cluster and service cidr
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-18 16:53:56 -07:00
Brian Downs
ecdec030fd fix remaining conflict
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-17 18:38:02 -07:00
Brian Downs
5e55b87c02 updates
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-16 14:01:47 -07:00
Brian Downs
79c7b8d36d resolve conflicts and other changes
Signed-off-by: Brian Downs <brian.downs@gmail.com>
2023-08-16 13:14:49 -07:00
21 changed files with 433 additions and 772 deletions

View File

@@ -8,9 +8,8 @@ platform:
steps:
- name: build
image: rancher/dapper:v0.6.0
image: rancher/dapper:v0.5.0
environment:
CROSS: 'true'
GITHUB_TOKEN:
from_secret: github_token
commands:
@@ -20,12 +19,12 @@ steps:
- name: docker
path: /var/run/docker.sock
when:
ref:
branch:
exclude:
- refs/tags/chart-*
- k3k-chart
- name: package-chart
image: rancher/dapper:v0.6.0
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
@@ -35,53 +34,29 @@ steps:
- name: docker
path: /var/run/docker.sock
when:
ref:
- refs/tags/chart-*
branch:
- k3k-chart
instance:
- drone-publish.rancher.io
event:
- tag
- name: release-chart
image: plugins/github-release
settings:
api_key:
from_secret: github_token
checksum:
- sha256
checksum_file: CHECKSUMsum.txt
checksum_flatten: true
files:
- "deploy/*"
when:
instance:
- drone-publish.rancher.io
ref:
include:
- refs/tags/chart-*
event:
- tag
- name: index-chart
image: rancher/dapper:v0.6.0
image: rancher/dapper:v0.5.0
environment:
GITHUB_TOKEN:
from_secret: github_token
commands:
- dapper index-chart
- dapper release-chart
volumes:
- name: docker
path: /var/run/docker.sock
when:
ref:
- refs/tags/chart-*
branch:
- k3k-chart
instance:
- drone-publish.rancher.io
event:
- tag
- name: github_binary_release
image: plugins/github-release
image: ibuildthecloud/github-release:v0.0.1
settings:
api_key:
from_secret: github_token
@@ -96,13 +71,13 @@ steps:
instance:
- drone-publish.rancher.io
ref:
include:
- refs/head/master
- refs/tags/*
exclude:
- refs/tags/chart-*
- refs/head/master
- refs/tags/*
event:
- tag
branch:
exclude:
- k3k-chart
- name: docker-publish
image: plugins/docker
@@ -117,13 +92,13 @@ steps:
instance:
- drone-publish.rancher.io
ref:
include:
- refs/head/master
- refs/tags/*
exclude:
- refs/tags/chart-*
- refs/head/master
- refs/tags/*
event:
- tag
branch:
exclude:
- k3k-chart
volumes:
- name: docker
@@ -153,10 +128,10 @@ steps:
instance:
- drone-publish.rancher.io
ref:
include:
- refs/head/master
- refs/tags/*
branch:
exclude:
- refs/tags/chart-*
- k3k-chart
depends_on:
- amd64
- amd64

View File

@@ -1,4 +1,4 @@
ARG GOLANG=rancher/hardened-build-base:v1.20.7b2
ARG GOLANG=rancher/hardened-build-base:v1.20.6b2
FROM ${GOLANG}
ARG DAPPER_HOST_ARCH

View File

@@ -8,47 +8,11 @@ An example on creating a k3k cluster on an RKE2 host using k3kcli
[![asciicast](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp.svg)](https://asciinema.org/a/eYlc3dsL2pfP2B50i3Ea8MJJp)
## Architecture
## Usage
K3K consists of a controller and a cli tool, the controller can be deployed via a helm chart and the cli can be downloaded from the releases page.
### Controller
The K3K controller will watch a CRD called `clusters.k3k.io`. Once found, the controller will create a separate namespace and it will create a K3S cluster as specified in the spec of the object.
Each server and agent is created as a separate pod that runs in the new namespace.
### CLI
The CLI provides a quick and easy way to create K3K clusters using simple flags, and automatically exposes the K3K clusters so it's accessible via a kubeconfig.
## Features
### Isolation
Each cluster runs in a sperate namespace that can be isolated via netowrk policies and RBAC rules, clusters also run in a sperate network namespace with flannel as the backend CNI. Finally, each cluster has a separate datastore which can be persisted.
In addition, k3k offers a persistence feature that can help users to persist their datatstore, using dynamic storage class volumes.
### Portability and Customization
The "Cluster" object is considered the template of the cluster that you can re-use to spin up multiple clusters in a matter of seconds.
K3K clusters use K3S internally and leverage all options that can be passed to K3S. Each cluster is exposed to the host cluster via NodePort, LoadBalancers, and Ingresses.
| | Separate Namespace (for each tenant) | K3K | vcluster | Separate Cluster (for each tenant) |
|-----------------------|---------------------------------------|------------------------------|-----------------|------------------------------------|
| Isolation | Very weak | Very strong | strong | Very strong |
| Access for tenants | Very restricted | Built-in k8s RBAC / Rancher | Vclustser admin | Cluster admin |
| Cost | Very cheap | Very cheap | cheap | expensive |
| Overhead | Very low | Very low | Very low | Very high |
| Networking | Shared | Separate | shared | separate |
| Cluster Configuration | | Very easy | Very hard | |
## Usage
### Deploy K3K Controller
### Deploy Controller
[Helm](https://helm.sh) must be installed to use the charts. Please refer to
Helm's [documentation](https://helm.sh/docs) to get started.
@@ -81,40 +45,12 @@ helm delete my-k3k
To create a new cluster you need to install and run the cli or create a cluster object, to install the cli:
#### For linux and macOS
1 - Donwload the binary, linux dowload url:
```
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli
```
macOS dowload url:
```
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli
```
Then copy to local bin
```
```sh
wget https://github.com/rancher/k3k/releases/download/v0.0.0-alpha6/k3kcli
chmod +x k3kcli
sudo cp k3kcli /usr/local/bin
```
#### For Windows
1 - Download the Binary:
Use PowerShell's Invoke-WebRequest cmdlet to download the binary:
```powershel
Invoke-WebRequest -Uri "https://github.com/rancher/k3k/releases/download/v0.0.0-alpha2/k3kcli-windows" -OutFile "k3kcli.exe"
```
2 - Copy the Binary to a Directory in PATH:
To allow running the binary from any command prompt, you can copy it to a directory in your system's PATH. For example, copying it to C:\Users\<YourUsername>\bin (create this directory if it doesn't exist):
```
Copy-Item "k3kcli.exe" "C:\bin"
```
3 - Update Environment Variable (PATH):
If you haven't already added `C:\bin` (or your chosen directory) to your PATH, you can do it through PowerShell:
```
setx PATH "C:\bin;%PATH%"
```
To create a new cluster you can use:
```sh

View File

@@ -2,5 +2,5 @@ apiVersion: v2
name: k3k
description: A Helm chart for K3K
type: application
version: 0.1.1-r1
appVersion: 0.1.0
version: 0.1.0-r1
appVersion: 0.0.0-alpha6

View File

@@ -95,20 +95,6 @@ spec:
type: string
clusterDNS:
type: string
tlsSANs:
type: array
items:
type: string
persistence:
type: object
properties:
type:
type: string
default: "ephermal"
storageClassName:
type: string
storageRequestSize:
type: string
scope: Cluster
names:
plural: clusters

View File

@@ -2,10 +2,10 @@ replicaCount: 1
namespace: k3k-system
image:
repository: rancher/k3k
repository: briandowns/k3k
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: "v0.1.0-amd64"
tag: "dev"
imagePullSecrets: []
nameOverride: ""

View File

@@ -13,7 +13,6 @@ import (
"github.com/rancher/k3k/cli/cmds"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@@ -35,7 +34,7 @@ var (
Scheme = runtime.NewScheme()
backoff = wait.Backoff{
Steps: 5,
Duration: 20 * time.Second,
Duration: 3 * time.Second,
Factor: 2,
Jitter: 0.1,
}
@@ -94,7 +93,7 @@ var (
cli.StringFlag{
Name: "persistence-type",
Usage: "Persistence mode for the nodes (ephermal, static, dynamic)",
Value: server.EphermalNodesType,
Value: cluster.EphermalNodesType,
Destination: &persistenceType,
},
cli.StringFlag{
@@ -178,7 +177,6 @@ func createCluster(clx *cli.Context) error {
if err := retry.OnError(backoff, apierrors.IsNotFound, func() error {
kubeconfig, err = extractKubeconfig(ctx, ctrlClient, cluster, host[0])
if err != nil {
logrus.Infof("waiting for cluster to be available: %v", err)
return err
}
return nil
@@ -201,8 +199,8 @@ func createCluster(clx *cli.Context) error {
}
func validateCreateFlags(clx *cli.Context) error {
if persistenceType != server.EphermalNodesType &&
persistenceType != server.DynamicNodesType {
if persistenceType != cluster.EphermalNodesType &&
persistenceType != cluster.DynamicNodesType {
return errors.New("invalid persistence type")
}
if token == "" {
@@ -211,9 +209,6 @@ func validateCreateFlags(clx *cli.Context) error {
if name == "" {
return errors.New("empty cluster name")
}
if name == cluster.ClusterInvalidName {
return errors.New("invalid cluster name")
}
if servers <= 0 {
return errors.New("invalid number of servers")
}

17
go.mod
View File

@@ -1,17 +1,10 @@
module github.com/rancher/k3k
go 1.20
replace (
go.etcd.io/etcd/api/v3 => github.com/k3s-io/etcd/api/v3 v3.5.9-k3s1
go.etcd.io/etcd/client/v3 => github.com/k3s-io/etcd/client/v3 v3.5.9-k3s1
)
go 1.19
require (
github.com/sirupsen/logrus v1.8.1
github.com/urfave/cli v1.22.12
go.etcd.io/etcd/api/v3 v3.5.9
go.etcd.io/etcd/client/v3 v3.5.5
k8s.io/api v0.26.1
k8s.io/apimachinery v0.26.1
k8s.io/client-go v0.26.1
@@ -21,8 +14,6 @@ require (
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
@@ -48,18 +39,12 @@ require (
github.com/prometheus/procfs v0.8.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/time v0.3.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.26.0 // indirect

45
go.sum
View File

@@ -39,8 +39,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -54,15 +52,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -75,8 +64,6 @@ github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
@@ -84,7 +71,6 @@ github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJ
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -110,7 +96,6 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -157,7 +142,6 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -177,7 +161,6 @@ github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -196,10 +179,6 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/k3s-io/etcd/api/v3 v3.5.9-k3s1 h1:y4ont0HdnS7gtWNTXM8gahpKjAHtctgON/sjVRthlZY=
github.com/k3s-io/etcd/api/v3 v3.5.9-k3s1/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
github.com/k3s-io/etcd/client/v3 v3.5.9-k3s1 h1:Knr/8l7Sx92zUyevYO0gIO5P6EEc6ztvRO5EzSnMy+A=
github.com/k3s-io/etcd/client/v3 v3.5.9-k3s1/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -267,7 +246,6 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rancher/dynamiclistener v0.3.5 h1:5TaIHvkDGmZKvc96Huur16zfTKOiLhDtK4S+WV0JA6A=
github.com/rancher/dynamiclistener v0.3.5/go.mod h1:dW/YF6/m2+uEyJ5VtEcd9THxda599HP6N9dSXk81+k0=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -288,7 +266,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
@@ -299,21 +276,15 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -379,7 +350,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
@@ -437,11 +407,8 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -458,7 +425,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
@@ -563,7 +529,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
@@ -571,8 +536,6 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -585,11 +548,6 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -602,8 +560,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
@@ -617,7 +573,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -15,21 +15,14 @@ LINKFLAGS="-X github.com/rancher/k3k.Version=$VERSION"
LINKFLAGS="-X github.com/rancher/k3k.GitCommit=$COMMIT $LINKFLAGS"
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-s390x
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3k-arm64
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-freebsd
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin-amd64
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3k-darwin
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3k-windows
fi
# build k3kcli
CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli ./cli
if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-s390x ./cli
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/k3kcli-arm64 ./cli
GOOS=freebsd GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3k-freebsd ./cli
GOOS=darwin GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin-adm64 ./cli
GOOS=darwin GOARCH=arm64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin ./cli
GOOS=windows GOARCH=amd64 go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/k3kcli-darwin ./cli
GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/k3kcli-windows ./cli
fi

View File

@@ -12,20 +12,21 @@ if [ $(git tag -l "$version") ]; then
exit 1
fi
# release the chart with artifacts
cr upload --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--git-repo k3k \
--skip-existing \
-o rancher
# update the index.yaml
cr index --token ${GITHUB_TOKEN} \
--release-name-template "chart-{{ .Version }}" \
--package-path ./deploy/ \
--index-path index.yaml \
--git-repo k3k \
-o rancher
-o rancher \
--push
# push to gh-pages
git config --global user.email "hussein.galal.ahmed.11@gmail.com"
git config --global user.name "galal-hussein"
git config --global url.https://${GITHUB_TOKEN}@github.com/.insteadOf https://github.com/
# push index.yaml to gh-pages
git add index.yaml
git commit -m "add chart-${CHART_TAG} to index.yaml"
git push --force --set-upstream origin HEAD:gh-pages

View File

@@ -74,9 +74,7 @@ type NodePortConfig struct {
}
type ClusterStatus struct {
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
TLSSANs []string `json:"tlsSANs,omitempty"`
Persistence *PersistenceConfig `json:"persistence,omitempty"`
ClusterCIDR string `json:"clusterCIDR,omitempty"`
ServiceCIDR string `json:"serviceCIDR,omitempty"`
ClusterDNS string `json:"clusterDNS,omitempty"`
}

View File

@@ -31,7 +31,7 @@ func (in *Cluster) DeepCopyInto(out *Cluster) {
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.TypeMeta = in.TypeMeta
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
out.Status = in.Status
return
}
@@ -145,16 +145,6 @@ func (in *ClusterSpec) DeepCopy() *ClusterSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = *in
if in.TLSSANs != nil {
in, out := &in.TLSSANs, &out.TLSSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Persistence != nil {
in, out := &in.Persistence, &out.Persistence
*out = new(PersistenceConfig)
**out = **in
}
return
}

View File

@@ -90,10 +90,10 @@ func (a *Agent) StatefulAgent(cluster *v1alpha1.Cluster) *apps.StatefulSet {
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Status.Persistence.StorageClassName,
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Status.Persistence.StorageRequestSize),
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
},
@@ -110,11 +110,11 @@ func (a *Agent) StatefulAgent(cluster *v1alpha1.Cluster) *apps.StatefulSet {
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Status.Persistence.StorageRequestSize),
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Status.Persistence.StorageClassName,
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
},
},
},

View File

@@ -2,30 +2,17 @@ package cluster
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strings"
"time"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/cluster/agent"
"github.com/rancher/k3k/pkg/controller/cluster/config"
"github.com/rancher/k3k/pkg/controller/cluster/server"
"github.com/rancher/k3k/pkg/controller/util"
"github.com/sirupsen/logrus"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -39,15 +26,13 @@ import (
const (
clusterController = "k3k-cluster-controller"
clusterFinalizerName = "cluster.k3k.io/finalizer"
etcdPodFinalizerName = "etcdpod.k3k.io/finalizer"
ClusterInvalidName = "system"
EphermalNodesType = "ephermal"
DynamicNodesType = "dynamic"
maxConcurrentReconciles = 1
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
defaultStoragePersistentSize = "1G"
memberRemovalTimeout = time.Minute * 1
defaultClusterCIDR = "10.44.0.0/16"
defaultClusterServiceCIDR = "10.45.0.0/16"
)
type ClusterReconciler struct {
@@ -73,51 +58,11 @@ func Add(ctx context.Context, mgr manager.Manager) error {
return err
}
if err := controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{}); err != nil {
return err
}
return controller.Watch(&source.Kind{Type: &v1.Pod{}},
&handler.EnqueueRequestForOwner{IsController: true, OwnerType: &apps.StatefulSet{}})
return controller.Watch(&source.Kind{Type: &v1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{})
}
func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var (
cluster v1alpha1.Cluster
podList v1.PodList
clusterName string
)
if req.Namespace != "" {
s := strings.Split(req.Namespace, "-")
if len(s) <= 1 {
return reconcile.Result{}, util.LogAndReturnErr("failed to get cluster namespace", nil)
}
clusterName = s[1]
var cluster v1alpha1.Cluster
if err := c.Client.Get(ctx, types.NamespacedName{Name: clusterName}, &cluster); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to get cluster object", err)
}
if *cluster.Spec.Servers == 1 {
klog.Infof("skipping request for etcd pod for cluster [%s] since it is not in HA mode", clusterName)
return reconcile.Result{}, nil
}
matchingLabels := client.MatchingLabels(map[string]string{"role": "server"})
listOpts := &client.ListOptions{Namespace: req.Namespace}
matchingLabels.ApplyToList(listOpts)
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
for _, pod := range podList.Items {
klog.Infof("Handle etcd server pod [%s/%s]", pod.Namespace, pod.Name)
if err := c.handleServerPod(ctx, cluster, &pod); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to handle etcd pod", err)
}
}
return reconcile.Result{}, nil
}
var cluster v1alpha1.Cluster
if err := c.Client.Get(ctx, req.NamespacedName, &cluster); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
@@ -127,7 +72,7 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
if !controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
controllerutil.AddFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to add cluster finalizer", err)
return reconcile.Result{}, err
}
}
@@ -143,34 +88,15 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
klog.Infof("enqueue cluster [%s]", cluster.Name)
if err := c.createCluster(ctx, &cluster); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to create cluster", err)
}
return reconcile.Result{}, nil
}
// remove finalizer from the server pods and update them.
matchingLabels := client.MatchingLabels(map[string]string{"role": "server"})
listOpts := &client.ListOptions{Namespace: util.ClusterNamespace(&cluster)}
matchingLabels.ApplyToList(listOpts)
if err := c.Client.List(ctx, &podList, listOpts); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
for _, pod := range podList.Items {
if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, &pod); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to remove etcd finalizer", err)
}
}
return reconcile.Result{}, c.createCluster(ctx, &cluster)
}
if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) {
// remove finalizer from the cluster and update it.
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName)
if err := c.Client.Update(ctx, &cluster); err != nil {
return reconcile.Result{}, util.LogAndReturnErr("failed to remove cluster finalizer", err)
return reconcile.Result{}, err
}
}
klog.Infof("deleting cluster [%s]", cluster.Name)
@@ -179,17 +105,13 @@ func (c *ClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request
}
func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1.Cluster) error {
if cluster.Name == ClusterInvalidName {
klog.Errorf("Invalid cluster name %s, no action will be taken", cluster.Name)
return nil
}
s := server.New(cluster, c.Client)
server := server.New(cluster, c.Client)
agent := agent.New(cluster)
if cluster.Spec.Persistence != nil {
cluster.Status.Persistence = cluster.Spec.Persistence
if cluster.Spec.Persistence.StorageRequestSize == "" {
// default to 1G of request size
cluster.Status.Persistence.StorageRequestSize = defaultStoragePersistentSize
if cluster.Spec.Persistence == nil {
// default to ephermal nodes
cluster.Spec.Persistence = &v1alpha1.PersistenceConfig{
Type: EphermalNodesType,
}
}
if err := c.Client.Update(ctx, cluster); err != nil {
@@ -211,7 +133,7 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1
}
klog.Infof("creating cluster service")
serviceIP, err := c.createClusterService(ctx, cluster, s)
serviceIP, err := c.createClusterService(ctx, cluster, server)
if err != nil {
return util.LogAndReturnErr("failed to create cluster service", err)
}
@@ -221,17 +143,23 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1
}
// creating statefulsets in case the user chose a persistence type other than ephermal
if err := c.server(ctx, cluster, s); err != nil {
return util.LogAndReturnErr("failed to create servers", err)
}
if err := c.agent(ctx, cluster); err != nil {
return util.LogAndReturnErr("failed to create agents", err)
if cluster.Spec.Persistence.Type != EphermalNodesType {
if cluster.Spec.Persistence.StorageRequestSize == "" {
// default to 1G of request size
cluster.Spec.Persistence.StorageRequestSize = "1G"
}
if err := c.createStatefulSets(ctx, cluster, server, agent); err != nil {
return util.LogAndReturnErr("failed to create servers and agents statefulsets", err)
}
} else {
if err := c.createDeployments(ctx, cluster, server); err != nil {
return util.LogAndReturnErr("failed to create servers and agents deployment", err)
}
}
if cluster.Spec.Expose != nil {
if cluster.Spec.Expose.Ingress != nil {
serverIngress, err := s.Ingress(ctx, c.Client)
serverIngress, err := server.Ingress(ctx, c.Client)
if err != nil {
return util.LogAndReturnErr("failed to create ingress object", err)
}
@@ -244,7 +172,7 @@ func (c *ClusterReconciler) createCluster(ctx context.Context, cluster *v1alpha1
}
}
kubeconfigSecret, err := s.GenerateNewKubeConfig(ctx, serviceIP)
kubeconfigSecret, err := server.GenerateNewKubeConfig(ctx, serviceIP)
if err != nil {
return util.LogAndReturnErr("failed to generate new kubeconfig", err)
}
@@ -349,36 +277,40 @@ func (c *ClusterReconciler) createClusterService(ctx context.Context, cluster *v
return service.Spec.ClusterIP, nil
}
func (c *ClusterReconciler) server(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
// create headless service for the statefulset
serverStatefulService := server.StatefulServerService(cluster)
if err := controllerutil.SetControllerReference(cluster, serverStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
ServerStatefulSet, err := server.StatefulServer(ctx, cluster)
func (c *ClusterReconciler) createDeployments(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server) error {
// create deployment for the init server
// the init deployment must have only 1 replica
initServerDeployment, err := server.Deploy(ctx, true)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, ServerStatefulSet, c.Scheme); err != nil {
if err := controllerutil.SetControllerReference(cluster, initServerDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, ServerStatefulSet); err != nil {
if err := c.Client.Create(ctx, initServerDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
// create deployment for the rest of the servers
serversDeployment, err := server.Deploy(ctx, false)
if err != nil {
return err
}
if err := controllerutil.SetControllerReference(cluster, serversDeployment, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serversDeployment); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster) error {
agent := agent.New(cluster)
agentsDeployment := agent.Deploy()
@@ -391,6 +323,80 @@ func (c *ClusterReconciler) agent(ctx context.Context, cluster *v1alpha1.Cluster
return err
}
}
return nil
}
func (c *ClusterReconciler) createStatefulSets(ctx context.Context, cluster *v1alpha1.Cluster, server *server.Server, agent *agent.Agent) error {
// create headless service for the init statefulset
initServerStatefulService := server.StatefulServerService(cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create statefulsets for the init server
// the init statefulset must have only 1 replica
initServerStatefulSet := server.StatefulServer(ctx, cluster, true)
if err := controllerutil.SetControllerReference(cluster, initServerStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, initServerStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create statefulset for the rest of the servers
// create headless service for the server statefulset
serverStatefulService := server.StatefulServerService(cluster, false)
if err := controllerutil.SetControllerReference(cluster, serverStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serverStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
serversStatefulSet := server.StatefulServer(ctx, cluster, false)
if err := controllerutil.SetControllerReference(cluster, serversStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, serversStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
// create headless service for the agents statefulset
agentStatefulService := agent.StatefulAgentService(cluster)
if err := controllerutil.SetControllerReference(cluster, agentStatefulService, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentStatefulService); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
agentsStatefulSet := agent.StatefulAgent(cluster)
if err := controllerutil.SetControllerReference(cluster, agentsStatefulSet, c.Scheme); err != nil {
return err
}
if err := c.Client.Create(ctx, agentsStatefulSet); err != nil {
if !apierrors.IsAlreadyExists(err) {
return err
}
}
return nil
}
@@ -451,124 +457,3 @@ func agentData(serviceIP, token string) string {
return fmt.Sprintf(`server: https://%s:6443
token: %s`, serviceIP, token)
}
func (c *ClusterReconciler) handleServerPod(ctx context.Context, cluster v1alpha1.Cluster, pod *v1.Pod) error {
if _, ok := pod.Labels["role"]; ok {
if pod.Labels["role"] != "server" {
return nil
}
} else {
return errors.New("server pod has no role label")
}
// if etcd pod is marked for deletion then we need to remove it from the etcd member list before deletion
if !pod.DeletionTimestamp.IsZero() {
if cluster.Status.Persistence.Type != server.EphermalNodesType {
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, pod); err != nil {
return err
}
}
}
tlsConfig, err := c.getETCDTLS(&cluster)
if err != nil {
return err
}
// remove server from etcd
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{
"https://k3k-server-service." + pod.Namespace + ":2379",
},
TLS: tlsConfig,
})
if err != nil {
return err
}
if err := removePeer(ctx, client, pod.Name, pod.Status.PodIP); err != nil {
return err
}
// remove our finalizer from the list and update it.
if controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.RemoveFinalizer(pod, etcdPodFinalizerName)
if err := c.Client.Update(ctx, pod); err != nil {
return err
}
}
}
if !controllerutil.ContainsFinalizer(pod, etcdPodFinalizerName) {
controllerutil.AddFinalizer(pod, etcdPodFinalizerName)
return c.Client.Update(ctx, pod)
}
return nil
}
// removePeer removes a peer from the cluster. The peer name and IP address must both match.
func removePeer(ctx context.Context, client *clientv3.Client, name, address string) error {
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if !strings.Contains(member.Name, name) {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
logrus.Infof("Removing name=%s id=%d address=%s from etcd", member.Name, member.ID, address)
_, err := client.MemberRemove(ctx, member.ID)
if errors.Is(err, rpctypes.ErrGRPCMemberNotFound) {
return nil
}
return err
}
}
}
return nil
}
func (c *ClusterReconciler) getETCDTLS(cluster *v1alpha1.Cluster) (*tls.Config, error) {
klog.Infof("generating etcd TLS client certificate for cluster [%s]", cluster.Name)
token := cluster.Spec.Token
endpoint := "k3k-server-service." + util.ClusterNamespace(cluster)
var bootstrap *server.ControlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
return true
}, func() error {
var err error
bootstrap, err = server.DecodedBootstrap(token, endpoint)
return err
}); err != nil {
return nil, err
}
etcdCert, etcdKey, err := server.CreateClientCertKey("etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, bootstrap.ETCDServerCA.Content, bootstrap.ETCDServerCAKey.Content)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(etcdCert, etcdKey)
if err != nil {
return nil, err
}
// create rootCA CertPool
cert, err := certutil.ParseCertsPEM([]byte(bootstrap.ETCDServerCA.Content))
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
pool.AddCert(cert[0])
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}

View File

@@ -13,12 +13,6 @@ func Server(cluster *v1alpha1.Cluster, init bool, serviceIP string) (*v1.Secret,
name = "k3k-init-server-config"
}
cluster.Status.TLSSANs = append(cluster.Spec.TLSSANs,
serviceIP,
"k3k-server-service",
"k3k-server-service."+util.ClusterNamespace(cluster),
)
config := serverConfigData(serviceIP, cluster)
if init {
config = initConfigData(cluster)
@@ -62,9 +56,9 @@ func serverOptions(cluster *v1alpha1.Cluster) string {
if cluster.Spec.ClusterDNS != "" {
opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n"
}
if len(cluster.Status.TLSSANs) > 0 {
if len(cluster.Spec.TLSSANs) > 0 {
opts = opts + "tls-san:\n"
for _, addr := range cluster.Status.TLSSANs {
for _, addr := range cluster.Spec.TLSSANs {
opts = opts + "- " + addr + "\n"
}
}

View File

@@ -22,6 +22,7 @@ func (s *Server) Ingress(ctx context.Context, client client.Client) (*networking
if err != nil {
return nil, err
}
ingressRules := s.ingressRules(addresses)
ingress := &networkingv1.Ingress{
TypeMeta: metav1.TypeMeta{
@@ -46,6 +47,7 @@ func (s *Server) Ingress(ctx context.Context, client client.Client) (*networking
func (s *Server) ingressRules(addresses []string) []networkingv1.IngressRule {
var ingressRules []networkingv1.IngressRule
pathTypePrefix := networkingv1.PathTypePrefix
for _, address := range addresses {
rule := networkingv1.IngressRule{
Host: s.cluster.Name + "." + address + wildcardDNS,

View File

@@ -26,13 +26,11 @@ const (
port = 6443
)
type ControlRuntimeBootstrap struct {
ServerCA content
ServerCAKey content
ClientCA content
ClientCAKey content
ETCDServerCA content
ETCDServerCAKey content
type controlRuntimeBootstrap struct {
ServerCA content
ServerCAKey content
ClientCA content
ClientCAKey content
}
type content struct {
@@ -48,7 +46,7 @@ type content struct {
func (s *Server) GenerateNewKubeConfig(ctx context.Context, ip string) (*v1.Secret, error) {
token := s.cluster.Spec.Token
var bootstrap *ControlRuntimeBootstrap
var bootstrap *controlRuntimeBootstrap
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool {
return true
}, func() error {
@@ -63,7 +61,7 @@ func (s *Server) GenerateNewKubeConfig(ctx context.Context, ip string) (*v1.Secr
return nil, err
}
adminCert, adminKey, err := CreateClientCertKey(
adminCert, adminKey, err := createClientCertKey(
adminCommonName, []string{user.SystemPrivilegedGroup},
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
bootstrap.ClientCA.Content,
@@ -94,7 +92,7 @@ func (s *Server) GenerateNewKubeConfig(ctx context.Context, ip string) (*v1.Secr
}
func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error) {
func requestBootstrap(token, serverIP string) (*controlRuntimeBootstrap, error) {
url := "https://" + serverIP + ":6443/v1-k3s/server-bootstrap"
client := http.Client{
@@ -118,7 +116,7 @@ func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error)
}
defer resp.Body.Close()
var runtimeBootstrap ControlRuntimeBootstrap
var runtimeBootstrap controlRuntimeBootstrap
if err := json.NewDecoder(resp.Body).Decode(&runtimeBootstrap); err != nil {
return nil, err
}
@@ -126,7 +124,7 @@ func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error)
return &runtimeBootstrap, nil
}
func CreateClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCert, caKey string) ([]byte, []byte, error) {
func createClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCert, caKey string) ([]byte, []byte, error) {
caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey))
if err != nil {
return nil, nil, err
@@ -205,7 +203,7 @@ func basicAuth(username, password string) string {
return base64.StdEncoding.EncodeToString([]byte(auth))
}
func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
func decodeBootstrap(bootstrap *controlRuntimeBootstrap) error {
//client-ca
decoded, err := base64.StdEncoding.DecodeString(bootstrap.ClientCA.Content)
if err != nil {
@@ -234,32 +232,5 @@ func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error {
}
bootstrap.ServerCAKey.Content = string(decoded)
//etcd-ca
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCA.Content)
if err != nil {
return err
}
bootstrap.ETCDServerCA.Content = string(decoded)
//etcd-ca-key
decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCAKey.Content)
if err != nil {
return err
}
bootstrap.ETCDServerCAKey.Content = string(decoded)
return nil
}
func DecodedBootstrap(token, ip string) (*ControlRuntimeBootstrap, error) {
bootstrap, err := requestBootstrap(token, ip)
if err != nil {
return nil, err
}
if err := decodeBootstrap(bootstrap); err != nil {
return nil, err
}
return bootstrap, nil
}

View File

@@ -2,7 +2,8 @@ package server
import (
"context"
"strings"
"fmt"
"strconv"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
@@ -11,7 +12,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -20,11 +20,6 @@ const (
serverName = "k3k-"
k3kSystemNamespace = serverName + "system"
initServerName = serverName + "init-server"
initContainerName = serverName + "server-check"
initContainerImage = "alpine/curl"
EphermalNodesType = "ephermal"
DynamicNodesType = "dynamic"
)
// Server
@@ -40,220 +35,18 @@ func New(cluster *v1alpha1.Cluster, client client.Client) *Server {
}
}
func (s *Server) podSpec(ctx context.Context, image, name string, persistent bool) v1.PodSpec {
podSpec := v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "initconfig",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "k3k-init-server-config",
Items: []v1.KeyToPath{
{
Key: "config.yaml",
Path: "config.yaml",
},
},
},
},
},
{
Name: "config",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "k3k-server-config",
Items: []v1.KeyToPath{
{
Key: "config.yaml",
Path: "config.yaml",
},
},
},
},
},
{
Name: "run",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varrun",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibcni",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlog",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
{
Name: name,
Image: image,
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
},
SecurityContext: &v1.SecurityContext{
Privileged: pointer.Bool(true),
},
Command: []string{
"/bin/sh",
"-c",
`if [ ${POD_NAME: -1} == 0 ]; then
/bin/k3s server --config /opt/rancher/k3s/init/config.yaml ` + strings.Join(s.cluster.Spec.ServerArgs, " ") + `
else /bin/k3s server --config /opt/rancher/k3s/server/config.yaml ` + strings.Join(s.cluster.Spec.ServerArgs, " ") + `
fi
`,
},
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
MountPath: "/opt/rancher/k3s/server",
ReadOnly: false,
},
{
Name: "initconfig",
MountPath: "/opt/rancher/k3s/init",
ReadOnly: false,
},
{
Name: "run",
MountPath: "/run",
ReadOnly: false,
},
{
Name: "varrun",
MountPath: "/var/run",
ReadOnly: false,
},
{
Name: "varlibcni",
MountPath: "/var/lib/cni",
ReadOnly: false,
},
{
Name: "varlibkubelet",
MountPath: "/var/lib/kubelet",
ReadOnly: false,
},
{
Name: "varlibrancherk3s",
MountPath: "/var/lib/rancher/k3s",
ReadOnly: false,
},
{
Name: "varlog",
MountPath: "/var/log",
ReadOnly: false,
},
},
},
},
}
func (s *Server) Deploy(ctx context.Context, init bool) (*apps.Deployment, error) {
var replicas int32
image := util.K3SImage(s.cluster)
if !persistent {
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}, v1.Volume{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
)
}
// Adding readiness probes to deployment
podSpec.Containers[0].ReadinessProbe = &v1.Probe{
InitialDelaySeconds: 60,
FailureThreshold: 5,
TimeoutSeconds: 10,
ProbeHandler: v1.ProbeHandler{
TCPSocket: &v1.TCPSocketAction{
Port: intstr.FromInt(6443),
Host: "127.0.0.1",
},
},
}
return podSpec
}
func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster) (*apps.StatefulSet, error) {
var (
replicas int32
pvClaims []v1.PersistentVolumeClaim
persistent bool
)
image := util.K3SImage(cluster)
name := serverName + "server"
if init {
name = serverName + "init-server"
}
replicas = *cluster.Spec.Servers
if cluster.Spec.Persistence != nil && cluster.Spec.Persistence.Type != EphermalNodesType {
persistent = true
pvClaims = []v1.PersistentVolumeClaim{
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibrancherk3s",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibkubelet",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
},
},
}
replicas = *s.cluster.Spec.Servers - 1
if init {
replicas = 1
}
var volumes []v1.Volume
@@ -314,13 +107,177 @@ func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster)
volumeMounts = append(volumeMounts, volumeMount)
}
podSpec := s.podSpec(ctx, image, name, persistent)
podSpec := s.podSpec(ctx, image, name, false)
podSpec.Volumes = append(podSpec.Volumes, volumes...)
podSpec.Containers[0].VolumeMounts = append(podSpec.Containers[0].VolumeMounts, volumeMounts...)
fmt.Printf("XXX - Pod Spec\n %#v\n", podSpec)
return &apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: s.cluster.Name + "-" + name,
Namespace: util.ClusterNamespace(s.cluster),
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster": s.cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": s.cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
Spec: podSpec,
},
},
}, nil
}
func (s *Server) podSpec(ctx context.Context, image, name string, statefulSet bool) v1.PodSpec {
args := append([]string{"server", "--config", "/opt/rancher/k3s/config.yaml"}, s.cluster.Spec.ServerArgs...)
podSpec := v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "config",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name + "-config",
Items: []v1.KeyToPath{
{
Key: "config.yaml",
Path: "config.yaml",
},
},
},
},
},
{
Name: "run",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varrun",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlibcni",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
{
Name: "varlog",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
{
Name: name,
Image: image,
SecurityContext: &v1.SecurityContext{
Privileged: pointer.Bool(true),
},
Command: []string{
"/bin/k3s",
},
Args: args,
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
MountPath: "/opt/rancher/k3s/",
ReadOnly: false,
},
{
Name: "run",
MountPath: "/run",
ReadOnly: false,
},
{
Name: "varrun",
MountPath: "/var/run",
ReadOnly: false,
},
{
Name: "varlibcni",
MountPath: "/var/lib/cni",
ReadOnly: false,
},
{
Name: "varlibkubelet",
MountPath: "/var/lib/kubelet",
ReadOnly: false,
},
{
Name: "varlibrancherk3s",
MountPath: "/var/lib/rancher/k3s",
ReadOnly: false,
},
{
Name: "varlog",
MountPath: "/var/log",
ReadOnly: false,
},
},
},
},
}
if !statefulSet {
podSpec.Volumes = append(podSpec.Volumes, v1.Volume{
Name: "varlibkubelet",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}, v1.Volume{
Name: "varlibrancherk3s",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
)
}
return podSpec
}
func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster, init bool) *apps.StatefulSet {
var replicas int32
image := util.K3SImage(cluster)
name := serverName
if init {
name = initServerName
}
replicas = *cluster.Spec.Servers - 1
if init {
replicas = 1
}
return &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
@@ -334,18 +291,59 @@ func (s *Server) StatefulServer(ctx context.Context, cluster *v1alpha1.Cluster)
MatchLabels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
VolumeClaimTemplates: []v1.PersistentVolumeClaim{
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibrancherk3s",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "varlibkubelet",
Namespace: util.ClusterNamespace(cluster),
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"storage": resource.MustParse(cluster.Spec.Persistence.StorageRequestSize),
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &cluster.Spec.Persistence.StorageClassName,
},
},
},
VolumeClaimTemplates: pvClaims,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
},
Spec: podSpec,
Spec: s.podSpec(ctx, image, name, true),
},
},
}, nil
}
}

View File

@@ -1,6 +1,8 @@
package server
import (
"strconv"
"github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1"
"github.com/rancher/k3k/pkg/controller/util"
v1 "k8s.io/api/core/v1"
@@ -38,18 +40,16 @@ func (s *Server) Service(cluster *v1alpha1.Cluster) *v1.Service {
Protocol: v1.ProtocolTCP,
Port: port,
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,
Port: 2379,
},
},
},
}
}
func (s *Server) StatefulServerService(cluster *v1alpha1.Cluster) *v1.Service {
func (s *Server) StatefulServerService(cluster *v1alpha1.Cluster, init bool) *v1.Service {
name := serverName
if init {
name = initServerName
}
return &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
@@ -65,6 +65,7 @@ func (s *Server) StatefulServerService(cluster *v1alpha1.Cluster) *v1.Service {
Selector: map[string]string{
"cluster": cluster.Name,
"role": "server",
"init": strconv.FormatBool(init),
},
Ports: []v1.ServicePort{
{
@@ -72,11 +73,6 @@ func (s *Server) StatefulServerService(cluster *v1alpha1.Cluster) *v1.Service {
Protocol: v1.ProtocolTCP,
Port: 6443,
},
{
Name: "k3s-etcd-port",
Protocol: v1.ProtocolTCP,
Port: 2379,
},
},
},
}

View File

@@ -58,7 +58,8 @@ func Addresses(ctx context.Context, client client.Client) ([]string, error) {
return nil, err
}
var addresses []string
addresses := make([]string, len(nodeList.Items))
for _, node := range nodeList.Items {
addresses = append(addresses, nodeAddress(&node))
}