mirror of
https://github.com/rancher/k3k.git
synced 2026-03-02 09:40:31 +00:00
Compare commits
12 Commits
chart-1.0.
...
v1.0.2-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af5d33cfb8 | ||
|
|
f0d9b08b24 | ||
|
|
a871917aec | ||
|
|
c16eae99c7 | ||
|
|
fc6bcedc5f | ||
|
|
0086d5aa4a | ||
|
|
c9bb1bcf46 | ||
|
|
6d5dd8564f | ||
|
|
93025d301b | ||
|
|
e385ceb66f | ||
|
|
5c49c3d6b7 | ||
|
|
521ff17ef6 |
34
.github/workflows/fossa.yml
vendored
Normal file
34
.github/workflows/fossa.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: FOSSA Scanning
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main", "master", "release/**"]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
fossa-scanning:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
|
||||
|
||||
# The FOSSA token is shared between all repos in Rancher's GH org. It can be
|
||||
# used directly and there is no need to request specific access to EIO.
|
||||
- name: Read FOSSA token
|
||||
uses: rancher-eio/read-vault-secrets@main
|
||||
with:
|
||||
secrets: |
|
||||
secret/data/github/org/rancher/fossa/push token | FOSSA_API_KEY_PUSH_ONLY
|
||||
|
||||
- name: FOSSA scan
|
||||
uses: fossas/fossa-action@main
|
||||
with:
|
||||
api-key: ${{ env.FOSSA_API_KEY_PUSH_ONLY }}
|
||||
# Only runs the scan and do not provide/returns any results back to the
|
||||
# pipeline.
|
||||
run-tests: false
|
||||
4
.github/workflows/test-e2e.yaml
vendored
4
.github/workflows/test-e2e.yaml
vendored
@@ -20,8 +20,12 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Pandoc
|
||||
run: sudo apt-get install pandoc
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
tests-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
needs: validate
|
||||
|
||||
3
.github/workflows/test.yaml
vendored
3
.github/workflows/test.yaml
vendored
@@ -37,6 +37,9 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Install Pandoc
|
||||
run: sudo apt-get install pandoc
|
||||
|
||||
- name: Validate
|
||||
run: make validate
|
||||
|
||||
|
||||
26
Makefile
26
Makefile
@@ -10,16 +10,17 @@ GINKGO_VERSION ?= v2.21.0
|
||||
GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./...
|
||||
ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5
|
||||
ENVTEST_K8S_VERSION := 1.31.0
|
||||
CRD_REF_DOCS_VER ?= v0.1.0
|
||||
CRD_REF_DOCS_VER ?= v0.2.0
|
||||
|
||||
GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
|
||||
GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION)
|
||||
CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER)
|
||||
PANDOC := $(shell which pandoc 2> /dev/null)
|
||||
|
||||
ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
|
||||
ENVTEST_DIR ?= $(shell pwd)/.envtest
|
||||
|
||||
E2E_LABEL_FILTER ?= "e2e"
|
||||
E2E_LABEL_FILTER ?= e2e
|
||||
|
||||
export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path)
|
||||
|
||||
@@ -83,12 +84,27 @@ generate: ## Generate the CRDs specs
|
||||
go generate ./...
|
||||
|
||||
.PHONY: docs
|
||||
docs: ## Build the CRDs and CLI docs
|
||||
docs: docs-crds docs-cli ## Build the CRDs and CLI docs
|
||||
|
||||
.PHONY: docs-crds
|
||||
docs-crds: ## Build the CRDs docs
|
||||
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
|
||||
--renderer=markdown \
|
||||
--source-path=./pkg/apis/k3k.io/v1beta1 \
|
||||
--output-path=./docs/crds/crd-docs.md
|
||||
@go run ./docs/cli/genclidoc.go
|
||||
--output-path=./docs/crds/crds.md
|
||||
|
||||
$(CRD_REF_DOCS) --config=./docs/crds/config.yaml \
|
||||
--renderer=asciidoctor \
|
||||
--templates-dir=./docs/crds/templates/asciidoctor \
|
||||
--source-path=./pkg/apis/k3k.io/v1beta1 \
|
||||
--output-path=./docs/crds/crds.adoc
|
||||
|
||||
.PHONY: docs-cli
|
||||
docs-cli: ## Build the CLI docs
|
||||
ifeq (, $(PANDOC))
|
||||
$(error "pandoc not found in PATH.")
|
||||
endif
|
||||
@./scripts/generate-cli-docs
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## Find any linting issues in the project
|
||||
|
||||
@@ -67,7 +67,7 @@ To install it, simply download the latest available version for your architectur
|
||||
For example, you can download the Linux amd64 version with:
|
||||
|
||||
```
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.0/k3kcli-linux-amd64 && \
|
||||
wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.1/k3kcli-linux-amd64 && \
|
||||
chmod +x k3kcli && \
|
||||
sudo mv k3kcli /usr/local/bin
|
||||
```
|
||||
@@ -75,7 +75,7 @@ wget -qO k3kcli https://github.com/rancher/k3k/releases/download/v1.0.0/k3kcli-l
|
||||
You should now be able to run:
|
||||
```bash
|
||||
-> % k3kcli --version
|
||||
k3kcli version v1.0.0
|
||||
k3kcli version v1.0.1
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ apiVersion: v2
|
||||
name: k3k
|
||||
description: A Helm chart for K3K
|
||||
type: application
|
||||
version: 1.0.1-rc1
|
||||
appVersion: v1.0.1-rc1
|
||||
version: 1.0.1
|
||||
appVersion: v1.0.1
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
func NewClusterCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "cluster command",
|
||||
Short: "K3k cluster command.",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
|
||||
@@ -58,7 +58,7 @@ func NewClusterCreateCmd(appCtx *AppContext) *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create new cluster",
|
||||
Short: "Create a new cluster.",
|
||||
Example: "k3kcli cluster create [command options] NAME",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return validateCreateConfig(createConfig)
|
||||
|
||||
@@ -24,7 +24,7 @@ var keepData bool
|
||||
func NewClusterDeleteCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete an existing cluster",
|
||||
Short: "Delete an existing cluster.",
|
||||
Example: "k3kcli cluster delete [command options] NAME",
|
||||
RunE: delete(appCtx),
|
||||
Args: cobra.ExactArgs(1),
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
func NewClusterListCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the existing cluster",
|
||||
Short: "List all existing clusters.",
|
||||
Example: "k3kcli cluster list [command options]",
|
||||
RunE: list(appCtx),
|
||||
Args: cobra.NoArgs,
|
||||
|
||||
@@ -37,7 +37,7 @@ type GenerateKubeconfigConfig struct {
|
||||
func NewKubeconfigCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "kubeconfig",
|
||||
Short: "Manage kubeconfig for clusters",
|
||||
Short: "Manage kubeconfig for clusters.",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
@@ -52,7 +52,7 @@ func NewKubeconfigGenerateCmd(appCtx *AppContext) *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate kubeconfig for clusters",
|
||||
Short: "Generate kubeconfig for clusters.",
|
||||
RunE: generate(appCtx, cfg),
|
||||
Args: cobra.NoArgs,
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
func NewPolicyCmd(appCtx *AppContext) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "policy",
|
||||
Short: "policy command",
|
||||
Short: "K3k policy command.",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
|
||||
@@ -30,7 +30,7 @@ func NewPolicyCreateCmd(appCtx *AppContext) *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create new policy",
|
||||
Short: "Create a new policy.",
|
||||
Example: "k3kcli policy create [command options] NAME",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
switch config.mode {
|
||||
@@ -150,6 +150,8 @@ func bindPolicyToNamespaces(ctx context.Context, client client.Client, config *V
|
||||
|
||||
// no old policy, safe to update
|
||||
if oldPolicy == "" {
|
||||
ns.Labels[policy.PolicyNameLabelKey] = policyName
|
||||
|
||||
if err := client.Update(ctx, &ns); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete an existing policy",
|
||||
Short: "Delete an existing policy.",
|
||||
Example: "k3kcli policy delete [command options] NAME",
|
||||
RunE: policyDeleteAction(appCtx),
|
||||
Args: cobra.ExactArgs(1),
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func NewPolicyListCmd(appCtx *AppContext) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the existing policies",
|
||||
Short: "List all existing policies.",
|
||||
Example: "k3kcli policy list [command options]",
|
||||
RunE: policyList(appCtx),
|
||||
Args: cobra.NoArgs,
|
||||
|
||||
@@ -36,7 +36,7 @@ func NewRootCmd() *cobra.Command {
|
||||
rootCmd := &cobra.Command{
|
||||
SilenceUsage: true,
|
||||
Use: "k3kcli",
|
||||
Short: "CLI for K3K",
|
||||
Short: "CLI for K3K.",
|
||||
Version: buildinfo.Version,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
InitializeConfig(cmd)
|
||||
|
||||
@@ -4,7 +4,7 @@ This document provides advanced usage information for k3k, including detailed us
|
||||
|
||||
## Customizing the Cluster Resource
|
||||
|
||||
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs.
|
||||
The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crds.md) for the full specs.
|
||||
|
||||
**Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/k3kcli.md) documentation for more details.
|
||||
|
||||
@@ -115,7 +115,7 @@ The `serverArgs` field allows you to specify additional arguments to be passed t
|
||||
|
||||
## Using the cli
|
||||
|
||||
You can check the [k3kcli documentation](./cli/cli-docs.md) for the full specs.
|
||||
You can check the [k3kcli documentation](./cli/k3kcli.md) for the full specs.
|
||||
|
||||
### No storage provider:
|
||||
|
||||
|
||||
@@ -104,7 +104,7 @@ Common use cases for administrators leveraging VirtualClusterPolicy include:
|
||||
|
||||
The K3k controller actively monitors VirtualClusterPolicy resources and the corresponding Namespace bindings. When a VCP is applied or updated, the controller ensures that the defined configurations are enforced on the relevant virtual clusters and their associated resources within the targeted Namespaces.
|
||||
|
||||
For a deep dive into what VirtualClusterPolicy can do, along with more examples, check out the [VirtualClusterPolicy Concepts](./virtualclusterpolicy.md) page. For a full list of all the spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
|
||||
For a deep dive into what VirtualClusterPolicy can do, along with more examples, check out the [VirtualClusterPolicy Concepts](./virtualclusterpolicy.md) page. For a full list of all the spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crds.md#virtualclusterpolicy).
|
||||
|
||||
|
||||
## Comparison and Trade-offs
|
||||
|
||||
25
docs/cli/convert.lua
Normal file
25
docs/cli/convert.lua
Normal file
@@ -0,0 +1,25 @@
|
||||
local deleting_see_also = false
|
||||
|
||||
function Header(el)
|
||||
-- If we hit "SEE ALSO", start deleting and remove the header itself
|
||||
if pandoc.utils.stringify(el):upper() == "SEE ALSO" then
|
||||
deleting_see_also = true
|
||||
return {}
|
||||
end
|
||||
-- If we hit any other header, stop deleting
|
||||
deleting_see_also = false
|
||||
return el
|
||||
end
|
||||
|
||||
function BulletList(el)
|
||||
if deleting_see_also then
|
||||
return {} -- Deletes the list of links
|
||||
end
|
||||
return el
|
||||
end
|
||||
|
||||
function CodeBlock(el)
|
||||
-- Forces the ---- separator
|
||||
local content = "----\n" .. el.text .. "\n----\n\n"
|
||||
return pandoc.RawBlock('asciidoc', content)
|
||||
end
|
||||
283
docs/cli/k3kcli.adoc
Normal file
283
docs/cli/k3kcli.adoc
Normal file
@@ -0,0 +1,283 @@
|
||||
== k3kcli
|
||||
|
||||
CLI for K3K.
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
-h, --help help for k3kcli
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli cluster
|
||||
|
||||
K3k cluster command.
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
-h, --help help for cluster
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli cluster create
|
||||
|
||||
Create a new cluster.
|
||||
|
||||
----
|
||||
k3kcli cluster create [flags]
|
||||
----
|
||||
|
||||
=== Examples
|
||||
|
||||
----
|
||||
k3kcli cluster create [command options] NAME
|
||||
----
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
--agent-args strings agents extra arguments
|
||||
--agent-envs strings agents extra Envs
|
||||
--agents int number of agents
|
||||
--annotations stringArray Annotations to add to the cluster object (e.g. key=value)
|
||||
--cluster-cidr string cluster CIDR
|
||||
--custom-certs string The path for custom certificate directory
|
||||
-h, --help help for create
|
||||
--kubeconfig-server string override the kubeconfig server host
|
||||
--labels stringArray Labels to add to the cluster object (e.g. key=value)
|
||||
--mirror-host-nodes Mirror Host Cluster Nodes
|
||||
--mode string k3k mode type (shared, virtual) (default "shared")
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
--persistence-type string persistence mode for the nodes (dynamic, ephemeral) (default "dynamic")
|
||||
--policy string The policy to create the cluster in
|
||||
--server-args strings servers extra arguments
|
||||
--server-envs strings servers extra Envs
|
||||
--servers int number of servers (default 1)
|
||||
--service-cidr string service CIDR
|
||||
--storage-class-name string storage class name for dynamic persistence type
|
||||
--storage-request-size string storage size for dynamic persistence type
|
||||
--timeout duration The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h). (default 3m0s)
|
||||
--token string token of the cluster
|
||||
--version string k3s version
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli cluster delete
|
||||
|
||||
Delete an existing cluster.
|
||||
|
||||
----
|
||||
k3kcli cluster delete [flags]
|
||||
----
|
||||
|
||||
=== Examples
|
||||
|
||||
----
|
||||
k3kcli cluster delete [command options] NAME
|
||||
----
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
-h, --help help for delete
|
||||
--keep-data keeps persistence volumes created for the cluster after deletion
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli cluster list
|
||||
|
||||
List all existing clusters.
|
||||
|
||||
----
|
||||
k3kcli cluster list [flags]
|
||||
----
|
||||
|
||||
=== Examples
|
||||
|
||||
----
|
||||
k3kcli cluster list [command options]
|
||||
----
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
-h, --help help for list
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli kubeconfig
|
||||
|
||||
Manage kubeconfig for clusters.
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
-h, --help help for kubeconfig
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli kubeconfig generate
|
||||
|
||||
Generate kubeconfig for clusters.
|
||||
|
||||
----
|
||||
k3kcli kubeconfig generate [flags]
|
||||
----
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
--altNames strings altNames of the generated certificates for the kubeconfig
|
||||
--cn string Common name (CN) of the generated certificates for the kubeconfig (default "system:admin")
|
||||
--config-name string the name of the generated kubeconfig file
|
||||
--expiration-days int Expiration date of the certificates used for the kubeconfig (default 365)
|
||||
-h, --help help for generate
|
||||
--kubeconfig-server string override the kubeconfig server host
|
||||
--name string cluster name
|
||||
-n, --namespace string namespace of the k3k cluster
|
||||
--org strings Organization name (ORG) of the generated certificates for the kubeconfig
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli policy
|
||||
|
||||
K3k policy command.
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
-h, --help help for policy
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli policy create
|
||||
|
||||
Create a new policy.
|
||||
|
||||
----
|
||||
k3kcli policy create [flags]
|
||||
----
|
||||
|
||||
=== Examples
|
||||
|
||||
----
|
||||
k3kcli policy create [command options] NAME
|
||||
----
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
--annotations stringArray Annotations to add to the policy object (e.g. key=value)
|
||||
-h, --help help for create
|
||||
--labels stringArray Labels to add to the policy object (e.g. key=value)
|
||||
--mode string The allowed mode type of the policy (default "shared")
|
||||
--namespace strings The namespaces where to bind the policy
|
||||
--overwrite Overwrite namespace binding of existing policy
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli policy delete
|
||||
|
||||
Delete an existing policy.
|
||||
|
||||
----
|
||||
k3kcli policy delete [flags]
|
||||
----
|
||||
|
||||
=== Examples
|
||||
|
||||
----
|
||||
k3kcli policy delete [command options] NAME
|
||||
----
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
-h, --help help for delete
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
|
||||
== k3kcli policy list
|
||||
|
||||
List all existing policies.
|
||||
|
||||
----
|
||||
k3kcli policy list [flags]
|
||||
----
|
||||
|
||||
=== Examples
|
||||
|
||||
----
|
||||
k3kcli policy list [command options]
|
||||
----
|
||||
|
||||
=== Options
|
||||
|
||||
----
|
||||
-h, --help help for list
|
||||
----
|
||||
|
||||
=== Options inherited from parent commands
|
||||
|
||||
----
|
||||
--debug Turn on debug logs
|
||||
--kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)
|
||||
----
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli
|
||||
|
||||
CLI for K3K
|
||||
CLI for K3K.
|
||||
|
||||
### Options
|
||||
|
||||
@@ -12,7 +12,7 @@ CLI for K3K
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.
|
||||
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters.
|
||||
* [k3kcli policy](k3kcli_policy.md) - K3k policy command.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli cluster
|
||||
|
||||
cluster command
|
||||
K3k cluster command.
|
||||
|
||||
### Options
|
||||
|
||||
@@ -17,8 +17,8 @@ cluster command
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K
|
||||
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create new cluster
|
||||
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster
|
||||
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all the existing cluster
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K.
|
||||
* [k3kcli cluster create](k3kcli_cluster_create.md) - Create a new cluster.
|
||||
* [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster.
|
||||
* [k3kcli cluster list](k3kcli_cluster_list.md) - List all existing clusters.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli cluster create
|
||||
|
||||
Create new cluster
|
||||
Create a new cluster.
|
||||
|
||||
```
|
||||
k3kcli cluster create [flags]
|
||||
@@ -49,5 +49,5 @@ k3kcli cluster create [command options] NAME
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli cluster delete
|
||||
|
||||
Delete an existing cluster
|
||||
Delete an existing cluster.
|
||||
|
||||
```
|
||||
k3kcli cluster delete [flags]
|
||||
@@ -29,5 +29,5 @@ k3kcli cluster delete [command options] NAME
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli cluster list
|
||||
|
||||
List all the existing cluster
|
||||
List all existing clusters.
|
||||
|
||||
```
|
||||
k3kcli cluster list [flags]
|
||||
@@ -28,5 +28,5 @@ k3kcli cluster list [command options]
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - cluster command
|
||||
* [k3kcli cluster](k3kcli_cluster.md) - K3k cluster command.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli kubeconfig
|
||||
|
||||
Manage kubeconfig for clusters
|
||||
Manage kubeconfig for clusters.
|
||||
|
||||
### Options
|
||||
|
||||
@@ -17,6 +17,6 @@ Manage kubeconfig for clusters
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K
|
||||
* [k3kcli kubeconfig generate](k3kcli_kubeconfig_generate.md) - Generate kubeconfig for clusters
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K.
|
||||
* [k3kcli kubeconfig generate](k3kcli_kubeconfig_generate.md) - Generate kubeconfig for clusters.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli kubeconfig generate
|
||||
|
||||
Generate kubeconfig for clusters
|
||||
Generate kubeconfig for clusters.
|
||||
|
||||
```
|
||||
k3kcli kubeconfig generate [flags]
|
||||
@@ -29,5 +29,5 @@ k3kcli kubeconfig generate [flags]
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters
|
||||
* [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli policy
|
||||
|
||||
policy command
|
||||
K3k policy command.
|
||||
|
||||
### Options
|
||||
|
||||
@@ -17,8 +17,8 @@ policy command
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K
|
||||
* [k3kcli policy create](k3kcli_policy_create.md) - Create new policy
|
||||
* [k3kcli policy delete](k3kcli_policy_delete.md) - Delete an existing policy
|
||||
* [k3kcli policy list](k3kcli_policy_list.md) - List all the existing policies
|
||||
* [k3kcli](k3kcli.md) - CLI for K3K.
|
||||
* [k3kcli policy create](k3kcli_policy_create.md) - Create a new policy.
|
||||
* [k3kcli policy delete](k3kcli_policy_delete.md) - Delete an existing policy.
|
||||
* [k3kcli policy list](k3kcli_policy_list.md) - List all existing policies.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli policy create
|
||||
|
||||
Create new policy
|
||||
Create a new policy.
|
||||
|
||||
```
|
||||
k3kcli policy create [flags]
|
||||
@@ -32,5 +32,5 @@ k3kcli policy create [command options] NAME
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
* [k3kcli policy](k3kcli_policy.md) - K3k policy command.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli policy delete
|
||||
|
||||
Delete an existing policy
|
||||
Delete an existing policy.
|
||||
|
||||
```
|
||||
k3kcli policy delete [flags]
|
||||
@@ -27,5 +27,5 @@ k3kcli policy delete [command options] NAME
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
* [k3kcli policy](k3kcli_policy.md) - K3k policy command.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## k3kcli policy list
|
||||
|
||||
List all the existing policies
|
||||
List all existing policies.
|
||||
|
||||
```
|
||||
k3kcli policy list [flags]
|
||||
@@ -27,5 +27,5 @@ k3kcli policy list [command options]
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [k3kcli policy](k3kcli_policy.md) - policy command
|
||||
* [k3kcli policy](k3kcli_policy.md) - K3k policy command.
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
processor:
|
||||
# RE2 regular expressions describing type fields that should be excluded from the generated documentation.
|
||||
ignoreFields:
|
||||
- "status$"
|
||||
- "TypeMeta$"
|
||||
- "status$"
|
||||
- "TypeMeta$"
|
||||
|
||||
render:
|
||||
# Version of Kubernetes to use when generating links to Kubernetes API documentation.
|
||||
|
||||
645
docs/crds/crds.adoc
Normal file
645
docs/crds/crds.adoc
Normal file
@@ -0,0 +1,645 @@
|
||||
[id="k3k-api-reference"]
|
||||
= API Reference
|
||||
:revdate: "2006-01-02"
|
||||
:page-revdate: {revdate}
|
||||
:anchor_prefix: k8s-api
|
||||
|
||||
== Packages
|
||||
- xref:{anchor_prefix}-k3k-io-v1beta1[$$k3k.io/v1beta1$$]
|
||||
|
||||
|
||||
[id="{anchor_prefix}-k3k-io-v1beta1"]
|
||||
== k3k.io/v1beta1
|
||||
|
||||
|
||||
=== Resource Types
|
||||
- xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster[$$Cluster$$]
|
||||
- xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterlist[$$ClusterList$$]
|
||||
- xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy[$$VirtualClusterPolicy$$]
|
||||
- xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicylist[$$VirtualClusterPolicyList$$]
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-addon"]
|
||||
=== Addon
|
||||
|
||||
|
||||
|
||||
Addon specifies a Secret containing YAML to be deployed on cluster startup.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`secretNamespace`* __string__ | SecretNamespace is the namespace of the Secret. + | |
|
||||
| *`secretRef`* __string__ | SecretRef is the name of the Secret. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster"]
|
||||
=== Cluster
|
||||
|
||||
|
||||
|
||||
Cluster defines a virtual Kubernetes cluster managed by k3k.
|
||||
It specifies the desired state of a virtual cluster, including version, node configuration, and networking.
|
||||
k3k uses this to provision and manage these virtual clusters.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterlist[$$ClusterList$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`apiVersion`* __string__ | `k3k.io/v1beta1` | |
|
||||
| *`kind`* __string__ | `Cluster` | |
|
||||
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
| |
|
||||
| *`spec`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]__ | Spec defines the desired state of the Cluster. + | { } |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterlist"]
|
||||
=== ClusterList
|
||||
|
||||
|
||||
|
||||
ClusterList is a list of Cluster resources.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`apiVersion`* __string__ | `k3k.io/v1beta1` | |
|
||||
| *`kind`* __string__ | `ClusterList` | |
|
||||
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta[$$ListMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
| |
|
||||
| *`items`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster[$$Cluster$$] array__ | | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clustermode"]
|
||||
=== ClusterMode
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
ClusterMode is the possible provisioning mode of a Cluster.
|
||||
|
||||
_Validation:_
|
||||
- Enum: [shared virtual]
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterphase"]
|
||||
=== ClusterPhase
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
ClusterPhase is a high-level summary of the cluster's current lifecycle state.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterstatus[$$ClusterStatus$$]
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec"]
|
||||
=== ClusterSpec
|
||||
|
||||
|
||||
|
||||
ClusterSpec defines the desired state of a virtual Kubernetes cluster.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-cluster[$$Cluster$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`version`* __string__ | Version is the K3s version to use for the virtual nodes. +
|
||||
It should follow the K3s versioning convention (e.g., v1.28.2-k3s1). +
|
||||
If not specified, the Kubernetes version of the host node will be used. + | |
|
||||
| *`mode`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clustermode[$$ClusterMode$$]__ | Mode specifies the cluster provisioning mode: "shared" or "virtual". +
|
||||
Defaults to "shared". This field is immutable. + | shared | Enum: [shared virtual] +
|
||||
|
||||
| *`servers`* __integer__ | Servers specifies the number of K3s pods to run in server (control plane) mode. +
|
||||
Must be at least 1. Defaults to 1. + | 1 |
|
||||
| *`agents`* __integer__ | Agents specifies the number of K3s pods to run in agent (worker) mode. +
|
||||
Must be 0 or greater. Defaults to 0. +
|
||||
This field is ignored in "shared" mode. + | 0 |
|
||||
| *`clusterCIDR`* __string__ | ClusterCIDR is the CIDR range for pod IPs. +
|
||||
Defaults to 10.42.0.0/16 in shared mode and 10.52.0.0/16 in virtual mode. +
|
||||
This field is immutable. + | |
|
||||
| *`serviceCIDR`* __string__ | ServiceCIDR is the CIDR range for service IPs. +
|
||||
Defaults to 10.43.0.0/16 in shared mode and 10.53.0.0/16 in virtual mode. +
|
||||
This field is immutable. + | |
|
||||
| *`clusterDNS`* __string__ | ClusterDNS is the IP address for the CoreDNS service. +
|
||||
Must be within the ServiceCIDR range. Defaults to 10.43.0.10. +
|
||||
This field is immutable. + | |
|
||||
| *`persistence`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistenceconfig[$$PersistenceConfig$$]__ | Persistence specifies options for persisting etcd data. +
|
||||
Defaults to dynamic persistence, which uses a PersistentVolumeClaim to provide data persistence. +
|
||||
A default StorageClass is required for dynamic persistence. + | |
|
||||
| *`expose`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig[$$ExposeConfig$$]__ | Expose specifies options for exposing the API server. +
|
||||
By default, it's only exposed as a ClusterIP. + | |
|
||||
| *`nodeSelector`* __object (keys:string, values:string)__ | NodeSelector specifies node labels to constrain where server/agent pods are scheduled. +
|
||||
In "shared" mode, this also applies to workloads. + | |
|
||||
| *`priorityClass`* __string__ | PriorityClass specifies the priorityClassName for server/agent pods. +
|
||||
In "shared" mode, this also applies to workloads. + | |
|
||||
| *`tokenSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#secretreference-v1-core[$$SecretReference$$]__ | TokenSecretRef is a Secret reference containing the token used by worker nodes to join the cluster. +
|
||||
The Secret must have a "token" field in its data. + | |
|
||||
| *`tlsSANs`* __string array__ | TLSSANs specifies subject alternative names for the K3s server certificate. + | |
|
||||
| *`serverArgs`* __string array__ | ServerArgs specifies ordered key-value pairs for K3s server pods. +
|
||||
Example: ["--tls-san=example.com"] + | |
|
||||
| *`agentArgs`* __string array__ | AgentArgs specifies ordered key-value pairs for K3s agent pods. +
|
||||
Example: ["--node-name=my-agent-node"] + | |
|
||||
| *`serverEnvs`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core[$$EnvVar$$] array__ | ServerEnvs specifies list of environment variables to set in the server pod. + | |
|
||||
| *`agentEnvs`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#envvar-v1-core[$$EnvVar$$] array__ | AgentEnvs specifies list of environment variables to set in the agent pod. + | |
|
||||
| *`addons`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-addon[$$Addon$$] array__ | Addons specifies secrets containing raw YAML to deploy on cluster startup. + | |
|
||||
| *`serverLimit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core[$$ResourceList$$]__ | ServerLimit specifies resource limits for server nodes. + | |
|
||||
| *`workerLimit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcelist-v1-core[$$ResourceList$$]__ | WorkerLimit specifies resource limits for agent nodes. + | |
|
||||
| *`mirrorHostNodes`* __boolean__ | MirrorHostNodes controls whether node objects from the host cluster +
|
||||
are mirrored into the virtual cluster. + | |
|
||||
| *`customCAs`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-customcas[$$CustomCAs$$]__ | CustomCAs specifies the cert/key pairs for custom CA certificates. + | |
|
||||
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. + | { } |
|
||||
|===
|
||||
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-configmapsyncconfig"]
|
||||
=== ConfigMapSyncConfig
|
||||
|
||||
|
||||
|
||||
ConfigMapSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | true |
|
||||
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
|
||||
then all resources of the given type will be synced. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource"]
|
||||
=== CredentialSource
|
||||
|
||||
|
||||
|
||||
CredentialSource defines where to get a credential from.
|
||||
It can represent either a TLS key pair or a single private key.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsources[$$CredentialSources$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`secretName`* __string__ | SecretName specifies the name of an existing secret to use. +
|
||||
The controller expects specific keys inside based on the credential type: +
|
||||
- For TLS pairs (e.g., ServerCA): 'tls.crt' and 'tls.key'. +
|
||||
- For ServiceAccountTokenKey: 'tls.key'. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsources"]
|
||||
=== CredentialSources
|
||||
|
||||
|
||||
|
||||
CredentialSources lists all the required credentials, including both
|
||||
TLS key pairs and single signing keys.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-customcas[$$CustomCAs$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`serverCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ServerCA specifies the server-ca cert/key pair. + | |
|
||||
| *`clientCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ClientCA specifies the client-ca cert/key pair. + | |
|
||||
| *`requestHeaderCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | RequestHeaderCA specifies the request-header-ca cert/key pair. + | |
|
||||
| *`etcdServerCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ETCDServerCA specifies the etcd-server-ca cert/key pair. + | |
|
||||
| *`etcdPeerCA`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ETCDPeerCA specifies the etcd-peer-ca cert/key pair. + | |
|
||||
| *`serviceAccountToken`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsource[$$CredentialSource$$]__ | ServiceAccountToken specifies the service-account-token key. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-customcas"]
|
||||
=== CustomCAs
|
||||
|
||||
|
||||
|
||||
CustomCAs specifies the cert/key pairs for custom CA certificates.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`enabled`* __boolean__ | Enabled toggles this feature on or off. + | true |
|
||||
| *`sources`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-credentialsources[$$CredentialSources$$]__ | Sources defines the sources for all required custom CA certificates. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig"]
|
||||
=== ExposeConfig
|
||||
|
||||
|
||||
|
||||
ExposeConfig specifies options for exposing the API server.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`ingress`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingressconfig[$$IngressConfig$$]__ | Ingress specifies options for exposing the API server through an Ingress. + | |
|
||||
| *`loadBalancer`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-loadbalancerconfig[$$LoadBalancerConfig$$]__ | LoadBalancer specifies options for exposing the API server through a LoadBalancer service. + | |
|
||||
| *`nodePort`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-nodeportconfig[$$NodePortConfig$$]__ | NodePort specifies options for exposing the API server through NodePort. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingressconfig"]
|
||||
=== IngressConfig
|
||||
|
||||
|
||||
|
||||
IngressConfig specifies options for exposing the API server through an Ingress.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig[$$ExposeConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`annotations`* __object (keys:string, values:string)__ | Annotations specifies annotations to add to the Ingress. + | |
|
||||
| *`ingressClassName`* __string__ | IngressClassName specifies the IngressClass to use for the Ingress. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingresssyncconfig"]
|
||||
=== IngressSyncConfig
|
||||
|
||||
|
||||
|
||||
IngressSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
|
||||
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
|
||||
then all resources of the given type will be synced. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-loadbalancerconfig"]
|
||||
=== LoadBalancerConfig
|
||||
|
||||
|
||||
|
||||
LoadBalancerConfig specifies options for exposing the API server through a LoadBalancer service.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig[$$ExposeConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`serverPort`* __integer__ | ServerPort is the port on which the K3s server is exposed when type is LoadBalancer. +
|
||||
If not specified, the default https 443 port will be allocated. +
|
||||
If 0 or negative, the port will not be exposed. + | |
|
||||
| *`etcdPort`* __integer__ | ETCDPort is the port on which the ETCD service is exposed when type is LoadBalancer. +
|
||||
If not specified, the default etcd 2379 port will be allocated. +
|
||||
If 0 or negative, the port will not be exposed. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-nodeportconfig"]
|
||||
=== NodePortConfig
|
||||
|
||||
|
||||
|
||||
NodePortConfig specifies options for exposing the API server through NodePort.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-exposeconfig[$$ExposeConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`serverPort`* __integer__ | ServerPort is the port on each node on which the K3s server is exposed when type is NodePort. +
|
||||
If not specified, a random port between 30000-32767 will be allocated. +
|
||||
If out of range, the port will not be exposed. + | |
|
||||
| *`etcdPort`* __integer__ | ETCDPort is the port on each node on which the ETCD service is exposed when type is NodePort. +
|
||||
If not specified, a random port between 30000-32767 will be allocated. +
|
||||
If out of range, the port will not be exposed. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistenceconfig"]
|
||||
=== PersistenceConfig
|
||||
|
||||
|
||||
|
||||
PersistenceConfig specifies options for persisting etcd data.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`type`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistencemode[$$PersistenceMode$$]__ | Type specifies the persistence mode. + | dynamic |
|
||||
| *`storageClassName`* __string__ | StorageClassName is the name of the StorageClass to use for the PVC. +
|
||||
This field is only relevant in "dynamic" mode. + | |
|
||||
| *`storageRequestSize`* __string__ | StorageRequestSize is the requested size for the PVC. +
|
||||
This field is only relevant in "dynamic" mode. + | 2G |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistencemode"]
|
||||
=== PersistenceMode
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
PersistenceMode is the storage mode of a Cluster.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistenceconfig[$$PersistenceConfig$$]
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistentvolumeclaimsyncconfig"]
|
||||
=== PersistentVolumeClaimSyncConfig
|
||||
|
||||
|
||||
|
||||
PersistentVolumeClaimSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | true |
|
||||
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
|
||||
then all resources of the given type will be synced. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-podsecurityadmissionlevel"]
|
||||
=== PodSecurityAdmissionLevel
|
||||
|
||||
_Underlying type:_ _string_
|
||||
|
||||
PodSecurityAdmissionLevel is the policy level applied to the pods in the namespace.
|
||||
|
||||
_Validation:_
|
||||
- Enum: [privileged baseline restricted]
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
|
||||
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-priorityclasssyncconfig"]
|
||||
=== PriorityClassSyncConfig
|
||||
|
||||
|
||||
|
||||
PriorityClassSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | false |
|
||||
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
|
||||
then all resources of the given type will be synced. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretsyncconfig"]
|
||||
=== SecretSyncConfig
|
||||
|
||||
|
||||
|
||||
SecretSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | true |
|
||||
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
|
||||
then all resources of the given type will be synced. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-servicesyncconfig"]
|
||||
=== ServiceSyncConfig
|
||||
|
||||
|
||||
|
||||
ServiceSyncConfig specifies the sync options for services.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`enabled`* __boolean__ | Enabled is an on/off switch for syncing resources. + | true |
|
||||
| *`selector`* __object (keys:string, values:string)__ | Selector specifies set of labels of the resources that will be synced, if empty +
|
||||
then all resources of the given type will be synced. + | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig"]
|
||||
=== SyncConfig
|
||||
|
||||
|
||||
|
||||
SyncConfig will contain the resources that should be synced from virtual cluster to host cluster.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clusterspec[$$ClusterSpec$$]
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`services`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-servicesyncconfig[$$ServiceSyncConfig$$]__ | Services resources sync configuration. + | { enabled:true } |
|
||||
| *`configMaps`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-configmapsyncconfig[$$ConfigMapSyncConfig$$]__ | ConfigMaps resources sync configuration. + | { enabled:true } |
|
||||
| *`secrets`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-secretsyncconfig[$$SecretSyncConfig$$]__ | Secrets resources sync configuration. + | { enabled:true } |
|
||||
| *`ingresses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-ingresssyncconfig[$$IngressSyncConfig$$]__ | Ingresses resources sync configuration. + | { enabled:false } |
|
||||
| *`persistentVolumeClaims`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-persistentvolumeclaimsyncconfig[$$PersistentVolumeClaimSyncConfig$$]__ | PersistentVolumeClaims resources sync configuration. + | { enabled:true } |
|
||||
| *`priorityClasses`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-priorityclasssyncconfig[$$PriorityClassSyncConfig$$]__ | PriorityClasses resources sync configuration. + | { enabled:false } |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy"]
|
||||
=== VirtualClusterPolicy
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicy allows defining common configurations and constraints
|
||||
for clusters within a clusterpolicy.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicylist[$$VirtualClusterPolicyList$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`apiVersion`* __string__ | `k3k.io/v1beta1` | |
|
||||
| *`kind`* __string__ | `VirtualClusterPolicy` | |
|
||||
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
| |
|
||||
| *`spec`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec[$$VirtualClusterPolicySpec$$]__ | Spec defines the desired state of the VirtualClusterPolicy. + | { } |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicylist"]
|
||||
=== VirtualClusterPolicyList
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicyList is a list of VirtualClusterPolicy resources.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`apiVersion`* __string__ | `k3k.io/v1beta1` | |
|
||||
| *`kind`* __string__ | `VirtualClusterPolicyList` | |
|
||||
| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta[$$ListMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
| |
|
||||
| *`items`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy[$$VirtualClusterPolicy$$] array__ | | |
|
||||
|===
|
||||
|
||||
|
||||
[id="{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicyspec"]
|
||||
=== VirtualClusterPolicySpec
|
||||
|
||||
|
||||
|
||||
VirtualClusterPolicySpec defines the desired state of a VirtualClusterPolicy.
|
||||
|
||||
|
||||
|
||||
_Appears In:_
|
||||
|
||||
* xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-virtualclusterpolicy[$$VirtualClusterPolicy$$]
|
||||
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
| *`quota`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#resourcequotaspec-v1-core[$$ResourceQuotaSpec$$]__ | Quota specifies the resource limits for clusters within a clusterpolicy. + | |
|
||||
| *`limit`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#limitrangespec-v1-core[$$LimitRangeSpec$$]__ | Limit specifies the LimitRange that will be applied to all pods within the VirtualClusterPolicy +
|
||||
to set defaults and constraints (min/max) + | |
|
||||
| *`defaultNodeSelector`* __object (keys:string, values:string)__ | DefaultNodeSelector specifies the node selector that applies to all clusters (server + agent) in the target Namespace. + | |
|
||||
| *`defaultPriorityClass`* __string__ | DefaultPriorityClass specifies the priorityClassName applied to all pods of all clusters in the target Namespace. + | |
|
||||
| *`allowedMode`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-clustermode[$$ClusterMode$$]__ | AllowedMode specifies the allowed cluster provisioning mode. Defaults to "shared". + | shared | Enum: [shared virtual] +
|
||||
|
||||
| *`disableNetworkPolicy`* __boolean__ | DisableNetworkPolicy indicates whether to disable the creation of a default network policy for cluster isolation. + | |
|
||||
| *`podSecurityAdmissionLevel`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-podsecurityadmissionlevel[$$PodSecurityAdmissionLevel$$]__ | PodSecurityAdmissionLevel specifies the pod security admission level applied to the pods in the namespace. + | | Enum: [privileged baseline restricted] +
|
||||
|
||||
| *`sync`* __xref:{anchor_prefix}-github-com-rancher-k3k-pkg-apis-k3k-io-v1beta1-syncconfig[$$SyncConfig$$]__ | Sync specifies the resources types that will be synced from virtual cluster to host cluster. + | { } |
|
||||
|===
|
||||
|
||||
|
||||
|
||||
|
||||
19
docs/crds/templates/asciidoctor/gv_details.tpl
Normal file
19
docs/crds/templates/asciidoctor/gv_details.tpl
Normal file
@@ -0,0 +1,19 @@
|
||||
{{- define "gvDetails" -}}
|
||||
{{- $gv := . -}}
|
||||
[id="{{ asciidocGroupVersionID $gv | asciidocRenderAnchorID }}"]
|
||||
== {{ $gv.GroupVersionString }}
|
||||
|
||||
{{ $gv.Doc }}
|
||||
|
||||
{{- if $gv.Kinds }}
|
||||
=== Resource Types
|
||||
{{- range $gv.SortedKinds }}
|
||||
- {{ $gv.TypeForKind . | asciidocRenderTypeLink }}
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
|
||||
{{ range $gv.SortedTypes }}
|
||||
{{ template "type" . }}
|
||||
{{ end }}
|
||||
|
||||
{{- end -}}
|
||||
19
docs/crds/templates/asciidoctor/gv_list.tpl
Normal file
19
docs/crds/templates/asciidoctor/gv_list.tpl
Normal file
@@ -0,0 +1,19 @@
|
||||
{{- define "gvList" -}}
|
||||
{{- $groupVersions := . -}}
|
||||
|
||||
[id="k3k-api-reference"]
|
||||
= API Reference
|
||||
:revdate: "2006-01-02"
|
||||
:page-revdate: {revdate}
|
||||
:anchor_prefix: k8s-api
|
||||
|
||||
== Packages
|
||||
{{- range $groupVersions }}
|
||||
- {{ asciidocRenderGVLink . }}
|
||||
{{- end }}
|
||||
|
||||
{{ range $groupVersions }}
|
||||
{{ template "gvDetails" . }}
|
||||
{{ end }}
|
||||
|
||||
{{- end -}}
|
||||
43
docs/crds/templates/asciidoctor/type.tpl
Normal file
43
docs/crds/templates/asciidoctor/type.tpl
Normal file
@@ -0,0 +1,43 @@
|
||||
{{- define "type" -}}
|
||||
{{- $type := . -}}
|
||||
{{- if asciidocShouldRenderType $type -}}
|
||||
|
||||
[id="{{ asciidocTypeID $type | asciidocRenderAnchorID }}"]
|
||||
=== {{ $type.Name }}
|
||||
|
||||
{{ if $type.IsAlias }}_Underlying type:_ _{{ asciidocRenderTypeLink $type.UnderlyingType }}_{{ end }}
|
||||
|
||||
{{ $type.Doc }}
|
||||
|
||||
{{ if $type.Validation -}}
|
||||
_Validation:_
|
||||
{{- range $type.Validation }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{ if $type.References -}}
|
||||
_Appears In:_
|
||||
{{ range $type.SortedReferences }}
|
||||
* {{ asciidocRenderTypeLink . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{ if $type.Members -}}
|
||||
[cols="25a,55a,10a,10a", options="header"]
|
||||
|===
|
||||
| Field | Description | Default | Validation
|
||||
{{ if $type.GVK -}}
|
||||
| *`apiVersion`* __string__ | `{{ $type.GVK.Group }}/{{ $type.GVK.Version }}` | |
|
||||
| *`kind`* __string__ | `{{ $type.GVK.Kind }}` | |
|
||||
{{ end -}}
|
||||
|
||||
{{ range $type.Members -}}
|
||||
| *`{{ .Name }}`* __{{ asciidocRenderType .Type }}__ | {{ template "type_members" . }} | {{ .Default }} | {{ range .Validation -}} {{ asciidocRenderValidation . }} +
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
|===
|
||||
{{ end -}}
|
||||
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
8
docs/crds/templates/asciidoctor/type_members.tpl
Normal file
8
docs/crds/templates/asciidoctor/type_members.tpl
Normal file
@@ -0,0 +1,8 @@
|
||||
{{- define "type_members" -}}
|
||||
{{- $field := . -}}
|
||||
{{- if eq $field.Name "metadata" -}}
|
||||
Refer to Kubernetes API documentation for fields of `metadata`.
|
||||
{{ else -}}
|
||||
{{ asciidocRenderFieldDoc $field.Doc }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -3,8 +3,8 @@
|
||||
This guide walks through the various ways to create and manage virtual clusters in K3K. We'll cover common use cases using both the **Custom Resource Definitions (CRDs)** and the **K3K CLI**, so you can choose the method that fits your workflow.
|
||||
|
||||
> 📘 For full reference:
|
||||
> - [CRD Reference Documentation](../crds/crd-docs.md)
|
||||
> - [CLI Reference Documentation](../cli/cli-docs.md)
|
||||
> - [CRD Reference Documentation](../crds/crds.md)
|
||||
> - [CLI Reference Documentation](../cli/k3kcli.md)
|
||||
> - [Full example](../advanced-usage.md)
|
||||
|
||||
> [!NOTE]
|
||||
|
||||
@@ -143,5 +143,5 @@ spec:
|
||||
|
||||
## Further Reading
|
||||
|
||||
* For a complete reference of all `VirtualClusterPolicy` spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy).
|
||||
* For a complete reference of all `VirtualClusterPolicy` spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crds.md#virtualclusterpolicy).
|
||||
* To understand how VCPs fit into the overall K3k system, see the [Architecture](./architecture.md) document.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-helpers/storage/volume"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
@@ -22,6 +24,7 @@ import (
|
||||
const (
|
||||
pvcControllerName = "pvc-syncer-controller"
|
||||
pvcFinalizerName = "pvc.k3k.io/finalizer"
|
||||
pseudoPVLabel = "pod.k3k.io/pseudoPV"
|
||||
)
|
||||
|
||||
type PVCReconciler struct {
|
||||
@@ -105,6 +108,12 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
if err := r.HostClient.Delete(ctx, syncedPVC); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// delete the synced virtual PV
|
||||
if err := r.VirtualClient.Delete(ctx, newPersistentVolume(&virtPVC)); err != nil && !apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove the finalizer after cleaning up the synced pvc
|
||||
if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) {
|
||||
if err := r.VirtualClient.Update(ctx, &virtPVC); err != nil {
|
||||
@@ -127,7 +136,13 @@ func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (r
|
||||
|
||||
// note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being
|
||||
// handled by the host cluster.
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.HostClient.Create(ctx, syncedPVC))
|
||||
if err := r.HostClient.Create(ctx, syncedPVC); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// Creating a virtual PV to bound the existing PVC in the virtual cluster - needed for scheduling of
|
||||
// the consumer pods
|
||||
return reconcile.Result{}, r.createVirtualPersistentVolume(ctx, virtPVC)
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
@@ -136,3 +151,82 @@ func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeC
|
||||
|
||||
return hostPVC
|
||||
}
|
||||
|
||||
func (r *PVCReconciler) createVirtualPersistentVolume(ctx context.Context, pvc v1.PersistentVolumeClaim) error {
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
log.V(1).Info("Creating virtual PersistentVolume")
|
||||
|
||||
pv := newPersistentVolume(&pvc)
|
||||
|
||||
if err := r.VirtualClient.Create(ctx, pv); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
orig := pv.DeepCopy()
|
||||
pv.Status = v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeBound,
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.V(1).Info("Patch the status of PersistentVolumeClaim to Bound")
|
||||
|
||||
pvcPatch := pvc.DeepCopy()
|
||||
if pvcPatch.Annotations == nil {
|
||||
pvcPatch.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
pvcPatch.Annotations[volume.AnnBoundByController] = "yes"
|
||||
pvcPatch.Annotations[volume.AnnBindCompleted] = "yes"
|
||||
pvcPatch.Status.Phase = v1.ClaimBound
|
||||
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
|
||||
|
||||
return r.VirtualClient.Status().Update(ctx, pvcPatch)
|
||||
}
|
||||
|
||||
func newPersistentVolume(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
|
||||
var storageClass string
|
||||
|
||||
if obj.Spec.StorageClassName != nil {
|
||||
storageClass = *obj.Spec.StorageClassName
|
||||
}
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: obj.Name,
|
||||
Labels: map[string]string{
|
||||
pseudoPVLabel: "true",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
volume.AnnBoundByController: "true",
|
||||
volume.AnnDynamicallyProvisioned: "k3k-kubelet",
|
||||
},
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolume",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FlexVolume: &v1.FlexPersistentVolumeSource{
|
||||
Driver: "pseudopv",
|
||||
},
|
||||
},
|
||||
StorageClassName: storageClass,
|
||||
VolumeMode: obj.Spec.VolumeMode,
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
AccessModes: obj.Spec.AccessModes,
|
||||
Capacity: obj.Spec.Resources.Requests,
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
APIVersion: obj.APIVersion,
|
||||
UID: obj.UID,
|
||||
ResourceVersion: obj.ResourceVersion,
|
||||
Kind: obj.Kind,
|
||||
Namespace: obj.Namespace,
|
||||
Name: obj.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ var PVCTests = func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("creates a pvc on the host cluster", func() {
|
||||
It("creates a pvc on the host cluster and virtual pv in virtual cluster", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
@@ -100,5 +100,11 @@ var PVCTests = func() {
|
||||
Expect(*hostPVC.Spec.StorageClassName).To(Equal("test-sc"))
|
||||
|
||||
GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels)
|
||||
|
||||
var virtualPV v1.PersistentVolume
|
||||
key := client.ObjectKey{Name: pvc.Name}
|
||||
|
||||
err = virtTestEnv.k8sClient.Get(ctx, key, &virtualPV)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-helpers/storage/volume"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
"github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
podControllerName = "pod-pvc-controller"
|
||||
pseudoPVLabel = "pod.k3k.io/pseudoPV"
|
||||
)
|
||||
|
||||
type PodReconciler struct {
|
||||
*SyncerContext
|
||||
}
|
||||
|
||||
// AddPodPVCController adds pod controller to k3k-kubelet
|
||||
func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string) error {
|
||||
// initialize a new Reconciler
|
||||
reconciler := PodReconciler{
|
||||
SyncerContext: &SyncerContext{
|
||||
ClusterName: clusterName,
|
||||
ClusterNamespace: clusterNamespace,
|
||||
VirtualClient: virtMgr.GetClient(),
|
||||
HostClient: hostMgr.GetClient(),
|
||||
Translator: translate.ToHostTranslator{},
|
||||
},
|
||||
}
|
||||
|
||||
name := reconciler.Translator.TranslateName(clusterNamespace, podControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(virtMgr).
|
||||
Named(name).
|
||||
For(&v1.Pod{}).
|
||||
WithEventFilter(predicate.NewPredicateFuncs(reconciler.filterResources)).
|
||||
Complete(&reconciler)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) filterResources(object ctrlruntimeclient.Object) bool {
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check for pvc config
|
||||
syncConfig := cluster.Spec.Sync.PersistentVolumeClaims
|
||||
|
||||
// If PVC syncing is disabled, only process deletions to allow for cleanup.
|
||||
return syncConfig.Enabled || object.GetDeletionTimestamp() != nil
|
||||
}
|
||||
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.ClusterName, "clusterNamespace", r.ClusterNamespace)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var (
|
||||
virtPod v1.Pod
|
||||
cluster v1beta1.Cluster
|
||||
)
|
||||
|
||||
if err := r.HostClient.Get(ctx, types.NamespacedName{Name: r.ClusterName, Namespace: r.ClusterNamespace}, &cluster); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil {
|
||||
return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// reconcile pods with pvcs
|
||||
for _, vol := range virtPod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil {
|
||||
log.Info("Handling pod with pvc")
|
||||
|
||||
if err := r.reconcilePodWithPVC(ctx, &virtPod, vol.PersistentVolumeClaim); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// reconcilePodWithPVC will make sure to create a fake PV for each PVC for any pod so that it can be scheduled on the virtual-kubelet
|
||||
// and then created on the host, the PV is not synced to the host cluster.
|
||||
func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pvcSource *v1.PersistentVolumeClaimVolumeSource) error {
|
||||
log := ctrl.LoggerFrom(ctx).WithValues("PersistentVolumeClaim", pvcSource.ClaimName)
|
||||
ctx = ctrl.LoggerInto(ctx, log)
|
||||
|
||||
var pvc v1.PersistentVolumeClaim
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: pvcSource.ClaimName,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Get(ctx, key, &pvc); err != nil {
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
pv := r.pseudoPV(&pvc)
|
||||
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return r.handlePodDeletion(ctx, pv)
|
||||
}
|
||||
|
||||
log.Info("Creating pseudo Persistent Volume")
|
||||
|
||||
if err := r.VirtualClient.Create(ctx, pv); err != nil {
|
||||
return ctrlruntimeclient.IgnoreAlreadyExists(err)
|
||||
}
|
||||
|
||||
orig := pv.DeepCopy()
|
||||
pv.Status = v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeBound,
|
||||
}
|
||||
|
||||
if err := r.VirtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Patch the status of PersistentVolumeClaim to Bound")
|
||||
|
||||
pvcPatch := pvc.DeepCopy()
|
||||
if pvcPatch.Annotations == nil {
|
||||
pvcPatch.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
pvcPatch.Annotations[volume.AnnBoundByController] = "yes"
|
||||
pvcPatch.Annotations[volume.AnnBindCompleted] = "yes"
|
||||
pvcPatch.Status.Phase = v1.ClaimBound
|
||||
pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes
|
||||
|
||||
return r.VirtualClient.Status().Update(ctx, pvcPatch)
|
||||
}
|
||||
|
||||
func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume {
|
||||
var storageClass string
|
||||
|
||||
if obj.Spec.StorageClassName != nil {
|
||||
storageClass = *obj.Spec.StorageClassName
|
||||
}
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: obj.Name,
|
||||
Labels: map[string]string{
|
||||
pseudoPVLabel: "true",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
volume.AnnBoundByController: "true",
|
||||
volume.AnnDynamicallyProvisioned: "k3k-kubelet",
|
||||
},
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolume",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
FlexVolume: &v1.FlexPersistentVolumeSource{
|
||||
Driver: "pseudopv",
|
||||
},
|
||||
},
|
||||
StorageClassName: storageClass,
|
||||
VolumeMode: obj.Spec.VolumeMode,
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
AccessModes: obj.Spec.AccessModes,
|
||||
Capacity: obj.Spec.Resources.Requests,
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
APIVersion: obj.APIVersion,
|
||||
UID: obj.UID,
|
||||
ResourceVersion: obj.ResourceVersion,
|
||||
Kind: obj.Kind,
|
||||
Namespace: obj.Namespace,
|
||||
Name: obj.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *PodReconciler) handlePodDeletion(ctx context.Context, pv *v1.PersistentVolume) error {
|
||||
var currentPV v1.PersistentVolume
|
||||
if err := r.VirtualClient.Get(ctx, ctrlruntimeclient.ObjectKeyFromObject(pv), ¤tPV); err != nil {
|
||||
return ctrlruntimeclient.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
pvPatch := currentPV.DeepCopy()
|
||||
pvPatch.Spec.ClaimRef = nil
|
||||
pvPatch.Status.Phase = v1.VolumeReleased
|
||||
|
||||
controllerutil.RemoveFinalizer(pvPatch, "kubernetes.io/pv-protection")
|
||||
|
||||
if err := r.VirtualClient.Status().Update(ctx, pvPatch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrlruntimeclient.IgnoreNotFound(r.VirtualClient.Delete(ctx, ¤tPV))
|
||||
}
|
||||
@@ -242,7 +242,7 @@ func (k *kubelet) start(ctx context.Context) {
|
||||
// run the node async so that we can wait for it to be ready in another call
|
||||
|
||||
go func() {
|
||||
klog.SetLogger(k.logger)
|
||||
klog.SetLogger(k.logger.V(1))
|
||||
|
||||
ctx = log.WithLogger(ctx, klogv2.New(nil))
|
||||
if err := k.node.Run(ctx); err != nil {
|
||||
@@ -458,12 +458,6 @@ func addControllers(ctx context.Context, hostMgr, virtualMgr manager.Manager, c
|
||||
return errors.New("failed to add pvc syncer controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding pod pvc controller")
|
||||
|
||||
if err := syncer.AddPodPVCController(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
return errors.New("failed to add pod pvc controller: " + err.Error())
|
||||
}
|
||||
|
||||
logger.Info("adding priorityclass controller")
|
||||
|
||||
if err := syncer.AddPriorityClassSyncer(ctx, virtualMgr, hostMgr, c.ClusterName, c.ClusterNamespace); err != nil {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -86,7 +85,7 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger log
|
||||
CoreClient: coreClient,
|
||||
ClusterNamespace: namespace,
|
||||
ClusterName: name,
|
||||
logger: logger,
|
||||
logger: logger.WithValues("cluster", name),
|
||||
serverIP: serverIP,
|
||||
dnsIP: dnsIP,
|
||||
}
|
||||
@@ -95,8 +94,12 @@ func New(hostConfig rest.Config, hostMgr, virtualMgr manager.Manager, logger log
|
||||
}
|
||||
|
||||
// GetContainerLogs retrieves the logs of a container by name from the provider.
|
||||
func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, opts api.ContainerLogOpts) (io.ReadCloser, error) {
|
||||
hostPodName := p.Translator.TranslateName(namespace, podName)
|
||||
func (p *Provider) GetContainerLogs(ctx context.Context, namespace, name, containerName string, opts api.ContainerLogOpts) (io.ReadCloser, error) {
|
||||
hostPodName := p.Translator.TranslateName(namespace, name)
|
||||
|
||||
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
|
||||
logger.V(1).Info("GetContainerLogs")
|
||||
|
||||
options := corev1.PodLogOptions{
|
||||
Container: containerName,
|
||||
Timestamps: opts.Timestamps,
|
||||
@@ -125,20 +128,27 @@ func (p *Provider) GetContainerLogs(ctx context.Context, namespace, podName, con
|
||||
}
|
||||
|
||||
closer, err := p.CoreClient.Pods(p.ClusterNamespace).GetLogs(hostPodName, &options).Stream(ctx)
|
||||
p.logger.Error(err, fmt.Sprintf("got error when getting logs for %s in %s", hostPodName, p.ClusterNamespace))
|
||||
if err != nil {
|
||||
logger.Error(err, "Error getting logs from container")
|
||||
}
|
||||
|
||||
return closer, err
|
||||
}
|
||||
|
||||
// RunInContainer executes a command in a container in the pod, copying data
|
||||
// between in/out/err and the container's stdin/stdout/stderr.
|
||||
func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, containerName string, cmd []string, attach api.AttachIO) error {
|
||||
hostPodName := p.Translator.TranslateName(namespace, podName)
|
||||
func (p *Provider) RunInContainer(ctx context.Context, namespace, name, containerName string, cmd []string, attach api.AttachIO) error {
|
||||
hostPodName := p.Translator.TranslateName(namespace, name)
|
||||
|
||||
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
|
||||
logger.V(1).Info("RunInContainer")
|
||||
|
||||
req := p.CoreClient.RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(hostPodName).
|
||||
Namespace(p.ClusterNamespace).
|
||||
SubResource("exec")
|
||||
|
||||
req.VersionedParams(&corev1.PodExecOptions{
|
||||
Container: containerName,
|
||||
Command: cmd,
|
||||
@@ -150,10 +160,11 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, conta
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
logger.Error(err, "Error creating SPDY executor")
|
||||
return err
|
||||
}
|
||||
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
if err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: attach.Stdin(),
|
||||
Stdout: attach.Stdout(),
|
||||
Stderr: attach.Stderr(),
|
||||
@@ -161,18 +172,28 @@ func (p *Provider) RunInContainer(ctx context.Context, namespace, podName, conta
|
||||
TerminalSizeQueue: &translatorSizeQueue{
|
||||
resizeChan: attach.Resize(),
|
||||
},
|
||||
})
|
||||
}); err != nil {
|
||||
logger.Error(err, "Error while executing command in container")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttachToContainer attaches to the executing process of a container in the pod, copying data
|
||||
// between in/out/err and the container's stdin/stdout/stderr.
|
||||
func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, containerName string, attach api.AttachIO) error {
|
||||
hostPodName := p.Translator.TranslateName(namespace, podName)
|
||||
func (p *Provider) AttachToContainer(ctx context.Context, namespace, name, containerName string, attach api.AttachIO) error {
|
||||
hostPodName := p.Translator.TranslateName(namespace, name)
|
||||
|
||||
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "container", containerName)
|
||||
logger.V(1).Info("AttachToContainer")
|
||||
|
||||
req := p.CoreClient.RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(hostPodName).
|
||||
Namespace(p.ClusterNamespace).
|
||||
SubResource("attach")
|
||||
|
||||
req.VersionedParams(&corev1.PodAttachOptions{
|
||||
Container: containerName,
|
||||
TTY: attach.TTY(),
|
||||
@@ -183,10 +204,11 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&p.ClientConfig, http.MethodPost, req.URL())
|
||||
if err != nil {
|
||||
logger.Error(err, "Error creating SPDY executor")
|
||||
return err
|
||||
}
|
||||
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
if err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: attach.Stdin(),
|
||||
Stdout: attach.Stdout(),
|
||||
Stderr: attach.Stderr(),
|
||||
@@ -194,7 +216,12 @@ func (p *Provider) AttachToContainer(ctx context.Context, namespace, podName, co
|
||||
TerminalSizeQueue: &translatorSizeQueue{
|
||||
resizeChan: attach.Resize(),
|
||||
},
|
||||
})
|
||||
}); err != nil {
|
||||
logger.Error(err, "Error while attaching to container")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStatsSummary gets the stats for the node, including running pods
|
||||
@@ -203,7 +230,8 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
|
||||
|
||||
nodeList := &corev1.NodeList{}
|
||||
if err := p.CoreClient.RESTClient().Get().Resource("nodes").Do(ctx).Into(nodeList); err != nil {
|
||||
return nil, fmt.Errorf("unable to get nodes of cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
|
||||
p.logger.Error(err, "Unable to get nodes of cluster")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fetch the stats from all the nodes
|
||||
@@ -221,14 +249,13 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
|
||||
Suffix("stats/summary").
|
||||
DoRaw(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"unable to get stats of node '%s', from cluster %s in namespace %s: %w",
|
||||
n.Name, p.ClusterName, p.ClusterNamespace, err,
|
||||
)
|
||||
p.logger.Error(err, "Unable to get stats/summary from cluster node", "node", n.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats := &stats.Summary{}
|
||||
if err := json.Unmarshal(res, stats); err != nil {
|
||||
p.logger.Error(err, "Error unmarshaling stats/summary from cluster node", "node", n.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -241,6 +268,7 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
|
||||
|
||||
pods, err := p.GetPods(ctx)
|
||||
if err != nil {
|
||||
p.logger.Error(err, "Error getting pods from cluster for stats")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -278,9 +306,12 @@ func (p *Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
|
||||
|
||||
// GetMetricsResource gets the metrics for the node, including running pods
|
||||
func (p *Provider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily, error) {
|
||||
p.logger.V(1).Info("GetMetricsResource")
|
||||
|
||||
statsSummary, err := p.GetStatsSummary(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Join(err, errors.New("error fetching MetricsResource"))
|
||||
p.logger.Error(err, "Error getting stats summary from cluster for metrics")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
registry := compbasemetrics.NewKubeRegistry()
|
||||
@@ -288,15 +319,20 @@ func (p *Provider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily,
|
||||
|
||||
metricFamily, err := registry.Gather()
|
||||
if err != nil {
|
||||
return nil, errors.Join(err, errors.New("error gathering metrics from collector"))
|
||||
p.logger.Error(err, "Error gathering metrics from collector")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metricFamily, nil
|
||||
}
|
||||
|
||||
// PortForward forwards a local port to a port on the pod
|
||||
func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port int32, stream io.ReadWriteCloser) error {
|
||||
hostPodName := p.Translator.TranslateName(namespace, pod)
|
||||
func (p *Provider) PortForward(ctx context.Context, namespace, name string, port int32, stream io.ReadWriteCloser) error {
|
||||
hostPodName := p.Translator.TranslateName(namespace, name)
|
||||
|
||||
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName, "port", port)
|
||||
logger.V(1).Info("PortForward")
|
||||
|
||||
req := p.CoreClient.RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(hostPodName).
|
||||
@@ -305,6 +341,7 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
|
||||
|
||||
transport, upgrader, err := spdy.RoundTripperFor(&p.ClientConfig)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error creating RoundTripper for PortForward")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -318,10 +355,16 @@ func (p *Provider) PortForward(ctx context.Context, namespace, pod string, port
|
||||
// so more work is needed to detect a close and handle that appropriately.
|
||||
fw, err := portforward.New(dialer, []string{portAsString}, stopChannel, readyChannel, stream, stream)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error creating new PortForward")
|
||||
return err
|
||||
}
|
||||
|
||||
return fw.ForwardPorts()
|
||||
if err := fw.ForwardPorts(); err != nil {
|
||||
logger.Error(err, "Error forwarding ports")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreatePod executes createPod with retry
|
||||
@@ -329,17 +372,9 @@ func (p *Provider) CreatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
return p.withRetry(ctx, p.createPod, pod)
|
||||
}
|
||||
|
||||
// createPod takes a Kubernetes Pod and deploys it within the provider.
|
||||
func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
// fieldPath envs are not being translated correctly using the virtual kubelet pod controller
|
||||
// as a workaround we will try to fetch the pod from the virtual cluster and copy over the envSource
|
||||
var sourcePod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &sourcePod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tPod := sourcePod.DeepCopy()
|
||||
p.Translator.TranslateTo(tPod)
|
||||
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name)
|
||||
logger.V(1).Info("CreatePod")
|
||||
|
||||
// get Cluster definition
|
||||
clusterKey := types.NamespacedName{
|
||||
@@ -348,78 +383,101 @@ func (p *Provider) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
var cluster v1beta1.Cluster
|
||||
|
||||
if err := p.HostClient.Get(ctx, clusterKey, &cluster); err != nil {
|
||||
return fmt.Errorf("unable to get cluster %s in namespace %s: %w", p.ClusterName, p.ClusterNamespace, err)
|
||||
logger.Error(err, "Error getting Virtual Cluster definition")
|
||||
return err
|
||||
}
|
||||
|
||||
// these values shouldn't be set on create
|
||||
tPod.UID = ""
|
||||
tPod.ResourceVersion = ""
|
||||
// get Pod from Virtual Cluster
|
||||
key := types.NamespacedName{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
var virtualPod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, key, &virtualPod); err != nil {
|
||||
logger.Error(err, "Error getting Pod from Virtual Cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the virtual Pod and use it as a baseline for the hostPod
|
||||
// do some basic translation and clearing some values (UID, ResourceVersion, ...)
|
||||
|
||||
hostPod := virtualPod.DeepCopy()
|
||||
p.Translator.TranslateTo(hostPod)
|
||||
|
||||
logger = logger.WithValues("pod", hostPod.Name)
|
||||
|
||||
// the node was scheduled on the virtual kubelet, but leaving it this way will make it pending indefinitely
|
||||
tPod.Spec.NodeName = ""
|
||||
hostPod.Spec.NodeName = ""
|
||||
|
||||
tPod.Spec.NodeSelector = cluster.Spec.NodeSelector
|
||||
hostPod.Spec.NodeSelector = cluster.Spec.NodeSelector
|
||||
|
||||
// setting the hostname for the pod if its not set
|
||||
if pod.Spec.Hostname == "" {
|
||||
tPod.Spec.Hostname = k3kcontroller.SafeConcatName(pod.Name)
|
||||
if virtualPod.Spec.Hostname == "" {
|
||||
hostPod.Spec.Hostname = k3kcontroller.SafeConcatName(virtualPod.Name)
|
||||
}
|
||||
|
||||
// if the priorityClass for the virtual cluster is set then override the provided value
|
||||
// Note: the core-dns and local-path-provisioner pod are scheduled by k3s with the
|
||||
// 'system-cluster-critical' and 'system-node-critical' default priority classes.
|
||||
if !strings.HasPrefix(tPod.Spec.PriorityClassName, "system-") {
|
||||
if tPod.Spec.PriorityClassName != "" {
|
||||
tPriorityClassName := p.Translator.TranslateName("", tPod.Spec.PriorityClassName)
|
||||
tPod.Spec.PriorityClassName = tPriorityClassName
|
||||
if !strings.HasPrefix(hostPod.Spec.PriorityClassName, "system-") {
|
||||
if hostPod.Spec.PriorityClassName != "" {
|
||||
tPriorityClassName := p.Translator.TranslateName("", hostPod.Spec.PriorityClassName)
|
||||
hostPod.Spec.PriorityClassName = tPriorityClassName
|
||||
}
|
||||
|
||||
if cluster.Spec.PriorityClass != "" {
|
||||
tPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
tPod.Spec.Priority = nil
|
||||
hostPod.Spec.PriorityClassName = cluster.Spec.PriorityClass
|
||||
hostPod.Spec.Priority = nil
|
||||
}
|
||||
}
|
||||
|
||||
p.configurePodEnvs(hostPod, &virtualPod)
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(&sourcePod, tPod); err != nil {
|
||||
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// volumes will often refer to resources in the virtual cluster, but instead need to refer to the sync'd
|
||||
// host cluster version
|
||||
if err := p.transformVolumes(pod.Namespace, tPod.Spec.Volumes); err != nil {
|
||||
return fmt.Errorf("unable to sync volumes for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// sync serviceaccount token to a the host cluster
|
||||
if err := p.transformTokens(ctx, pod, tPod); err != nil {
|
||||
return fmt.Errorf("unable to transform tokens for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
for i, imagePullSecret := range tPod.Spec.ImagePullSecrets {
|
||||
tPod.Spec.ImagePullSecrets[i].Name = p.Translator.TranslateName(pod.Namespace, imagePullSecret.Name)
|
||||
}
|
||||
|
||||
// inject networking information to the pod including the virtual cluster controlplane endpoint
|
||||
configureNetworking(tPod, pod.Name, pod.Namespace, p.serverIP, p.dnsIP)
|
||||
|
||||
p.logger.Info("creating pod",
|
||||
"host_namespace", tPod.Namespace, "host_name", tPod.Name,
|
||||
"virtual_namespace", pod.Namespace, "virtual_name", pod.Name,
|
||||
)
|
||||
|
||||
// set ownerReference to the cluster object
|
||||
if err := controllerutil.SetControllerReference(&cluster, tPod, p.HostClient.Scheme()); err != nil {
|
||||
if err := p.configureFieldPathEnv(&virtualPod, hostPod); err != nil {
|
||||
logger.Error(err, "Unable to fetch fieldpath annotations for pod")
|
||||
return err
|
||||
}
|
||||
|
||||
return p.HostClient.Create(ctx, tPod)
|
||||
// volumes will often refer to resources in the virtual cluster
|
||||
// but instead need to refer to the synced host cluster version
|
||||
p.transformVolumes(pod.Namespace, hostPod.Spec.Volumes)
|
||||
|
||||
// sync serviceaccount token to a the host cluster
|
||||
if err := p.transformTokens(ctx, &virtualPod, hostPod); err != nil {
|
||||
logger.Error(err, "Unable to transform tokens for pod")
|
||||
return err
|
||||
}
|
||||
|
||||
for i, imagePullSecret := range hostPod.Spec.ImagePullSecrets {
|
||||
hostPod.Spec.ImagePullSecrets[i].Name = p.Translator.TranslateName(virtualPod.Namespace, imagePullSecret.Name)
|
||||
}
|
||||
|
||||
// inject networking information to the pod including the virtual cluster controlplane endpoint
|
||||
configureNetworking(hostPod, virtualPod.Name, virtualPod.Namespace, p.serverIP, p.dnsIP)
|
||||
|
||||
// set ownerReference to the cluster object
|
||||
if err := controllerutil.SetControllerReference(&cluster, hostPod, p.HostClient.Scheme()); err != nil {
|
||||
logger.Error(err, "Unable to set owner reference for pod")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.HostClient.Create(ctx, hostPod); err != nil {
|
||||
logger.Error(err, "Error creating pod on host cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Pod created on host cluster")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// withRetry retries passed function with interval and timeout
|
||||
func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev1.Pod) error, pod *corev1.Pod) error {
|
||||
const (
|
||||
interval = 2 * time.Second
|
||||
interval = time.Second
|
||||
timeout = 10 * time.Second
|
||||
)
|
||||
|
||||
@@ -443,44 +501,49 @@ func (p *Provider) withRetry(ctx context.Context, f func(context.Context, *corev
|
||||
return nil
|
||||
}
|
||||
|
||||
// transformVolumes changes the volumes to the representation in the host cluster. Will return an error
|
||||
// if one/more volumes couldn't be transformed
|
||||
func (p *Provider) transformVolumes(podNamespace string, volumes []corev1.Volume) error {
|
||||
for _, volume := range volumes {
|
||||
// transformVolumes changes the volumes to the representation in the host cluster
|
||||
func (p *Provider) transformVolumes(podNamespace string, volumes []corev1.Volume) {
|
||||
for i := range volumes {
|
||||
volume := &volumes[i]
|
||||
|
||||
// Skip volumes related to Kube API access
|
||||
if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) {
|
||||
continue
|
||||
}
|
||||
// note: this needs to handle downward api volumes as well, but more thought is needed on how to do that
|
||||
if volume.ConfigMap != nil {
|
||||
|
||||
switch {
|
||||
case volume.ConfigMap != nil:
|
||||
volume.ConfigMap.Name = p.Translator.TranslateName(podNamespace, volume.ConfigMap.Name)
|
||||
} else if volume.Secret != nil {
|
||||
|
||||
case volume.Secret != nil:
|
||||
volume.Secret.SecretName = p.Translator.TranslateName(podNamespace, volume.Secret.SecretName)
|
||||
} else if volume.Projected != nil {
|
||||
|
||||
case volume.PersistentVolumeClaim != nil:
|
||||
volume.PersistentVolumeClaim.ClaimName = p.Translator.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName)
|
||||
|
||||
case volume.Projected != nil:
|
||||
for _, source := range volume.Projected.Sources {
|
||||
if source.ConfigMap != nil {
|
||||
switch {
|
||||
case source.ConfigMap != nil:
|
||||
source.ConfigMap.Name = p.Translator.TranslateName(podNamespace, source.ConfigMap.Name)
|
||||
} else if source.Secret != nil {
|
||||
case source.Secret != nil:
|
||||
source.Secret.Name = p.Translator.TranslateName(podNamespace, source.Secret.Name)
|
||||
}
|
||||
}
|
||||
} else if volume.PersistentVolumeClaim != nil {
|
||||
volume.PersistentVolumeClaim.ClaimName = p.Translator.TranslateName(podNamespace, volume.PersistentVolumeClaim.ClaimName)
|
||||
} else if volume.DownwardAPI != nil {
|
||||
|
||||
case volume.DownwardAPI != nil:
|
||||
for _, downwardAPI := range volume.DownwardAPI.Items {
|
||||
if downwardAPI.FieldRef != nil {
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNameField {
|
||||
switch downwardAPI.FieldRef.FieldPath {
|
||||
case translate.MetadataNameField:
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
}
|
||||
|
||||
if downwardAPI.FieldRef.FieldPath == translate.MetadataNamespaceField {
|
||||
case translate.MetadataNamespaceField:
|
||||
downwardAPI.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdatePod executes updatePod with retry
|
||||
@@ -489,96 +552,110 @@ func (p *Provider) UpdatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
func (p *Provider) updatePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
p.logger.V(1).Info("got a request for update pod")
|
||||
|
||||
// Once scheduled a Pod cannot update other fields than the image of the containers, initcontainers and a few others
|
||||
// See: https://kubernetes.io/docs/concepts/workloads/pods/#pod-update-and-replacement
|
||||
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
|
||||
// Update Pod in the virtual cluster
|
||||
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "pod", hostPodName)
|
||||
logger.V(1).Info("UpdatePod")
|
||||
|
||||
var currentVirtualPod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, client.ObjectKeyFromObject(pod), ¤tVirtualPod); err != nil {
|
||||
return fmt.Errorf("unable to get pod to update from virtual cluster: %w", err)
|
||||
}
|
||||
//
|
||||
// Host Pod update
|
||||
//
|
||||
|
||||
hostNamespaceName := types.NamespacedName{
|
||||
hostKey := types.NamespacedName{
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.Translator.TranslateName(pod.Namespace, pod.Name),
|
||||
Name: hostPodName,
|
||||
}
|
||||
|
||||
var currentHostPod corev1.Pod
|
||||
|
||||
if err := p.HostClient.Get(ctx, hostNamespaceName, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to get pod to update from host cluster: %w", err)
|
||||
var hostPod corev1.Pod
|
||||
if err := p.HostClient.Get(ctx, hostKey, &hostPod); err != nil {
|
||||
logger.Error(err, "Unable to get Pod to update from host cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle ephemeral containers
|
||||
if !cmp.Equal(currentHostPod.Spec.EphemeralContainers, pod.Spec.EphemeralContainers) {
|
||||
p.logger.Info("Updating ephemeral containers")
|
||||
updatePod(&hostPod, pod)
|
||||
|
||||
currentHostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
|
||||
if err := p.HostClient.Update(ctx, &hostPod); err != nil {
|
||||
logger.Error(err, "Unable to update Pod in host cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, currentHostPod.Name, ¤tHostPod, metav1.UpdateOptions{}); err != nil {
|
||||
p.logger.Error(err, "error when updating ephemeral containers")
|
||||
// Ephemeral containers update (subresource)
|
||||
if !cmp.Equal(hostPod.Spec.EphemeralContainers, pod.Spec.EphemeralContainers) {
|
||||
logger.V(1).Info("Updating ephemeral containers in host pod")
|
||||
|
||||
hostPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
|
||||
|
||||
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, hostPod.Name, &hostPod, metav1.UpdateOptions{}); err != nil {
|
||||
logger.Error(err, "Error when updating ephemeral containers in host pod")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fieldpath annotations
|
||||
if err := p.configureFieldPathEnv(¤tVirtualPod, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to fetch fieldpath annotations for pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
logger.Info("Pod updated in host cluster")
|
||||
|
||||
//
|
||||
// Virtual Pod update
|
||||
//
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
}
|
||||
|
||||
currentVirtualPod.Spec.Containers = updateContainerImages(currentVirtualPod.Spec.Containers, pod.Spec.Containers)
|
||||
currentVirtualPod.Spec.InitContainers = updateContainerImages(currentVirtualPod.Spec.InitContainers, pod.Spec.InitContainers)
|
||||
|
||||
currentVirtualPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
|
||||
currentVirtualPod.Spec.Tolerations = pod.Spec.Tolerations
|
||||
|
||||
// in the virtual cluster we can update also the labels and annotations
|
||||
currentVirtualPod.Annotations = pod.Annotations
|
||||
currentVirtualPod.Labels = pod.Labels
|
||||
|
||||
if err := p.VirtualClient.Update(ctx, ¤tVirtualPod); err != nil {
|
||||
return fmt.Errorf("unable to update pod in the virtual cluster: %w", err)
|
||||
var virtualPod corev1.Pod
|
||||
if err := p.VirtualClient.Get(ctx, key, &virtualPod); err != nil {
|
||||
logger.Error(err, "Unable to get pod to update from virtual cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
// Update Pod in the host cluster
|
||||
currentHostPod.Spec.Containers = updateContainerImages(currentHostPod.Spec.Containers, pod.Spec.Containers)
|
||||
currentHostPod.Spec.InitContainers = updateContainerImages(currentHostPod.Spec.InitContainers, pod.Spec.InitContainers)
|
||||
updatePod(&virtualPod, pod)
|
||||
|
||||
// update ActiveDeadlineSeconds and Tolerations
|
||||
currentHostPod.Spec.ActiveDeadlineSeconds = pod.Spec.ActiveDeadlineSeconds
|
||||
currentHostPod.Spec.Tolerations = pod.Spec.Tolerations
|
||||
|
||||
// in the virtual cluster we can update also the labels and annotations
|
||||
maps.Copy(currentHostPod.Annotations, pod.Annotations)
|
||||
maps.Copy(currentHostPod.Labels, pod.Labels)
|
||||
|
||||
if err := p.HostClient.Update(ctx, ¤tHostPod); err != nil {
|
||||
return fmt.Errorf("unable to update pod in the host cluster: %w", err)
|
||||
if err := p.VirtualClient.Update(ctx, &virtualPod); err != nil {
|
||||
logger.Error(err, "Unable to update Pod in virtual cluster")
|
||||
return err
|
||||
}
|
||||
|
||||
// Ephemeral containers update (subresource)
|
||||
if !cmp.Equal(virtualPod.Spec.EphemeralContainers, pod.Spec.EphemeralContainers) {
|
||||
logger.V(1).Info("Updating ephemeral containers in virtual pod")
|
||||
|
||||
virtualPod.Spec.EphemeralContainers = pod.Spec.EphemeralContainers
|
||||
|
||||
if _, err := p.CoreClient.Pods(p.ClusterNamespace).UpdateEphemeralContainers(ctx, virtualPod.Name, &virtualPod, metav1.UpdateOptions{}); err != nil {
|
||||
logger.Error(err, "Error when updating ephemeral containers in virtual pod")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Pod updated in virtual and host cluster")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updatePod(dst, src *corev1.Pod) {
|
||||
updateContainerImages(dst.Spec.Containers, src.Spec.Containers)
|
||||
updateContainerImages(dst.Spec.InitContainers, src.Spec.InitContainers)
|
||||
|
||||
dst.Spec.ActiveDeadlineSeconds = src.Spec.ActiveDeadlineSeconds
|
||||
dst.Spec.Tolerations = src.Spec.Tolerations
|
||||
|
||||
dst.Annotations = src.Annotations
|
||||
dst.Labels = src.Labels
|
||||
}
|
||||
|
||||
// updateContainerImages will update the images of the original container images with the same name
|
||||
func updateContainerImages(original, updated []corev1.Container) []corev1.Container {
|
||||
newImages := make(map[string]string)
|
||||
func updateContainerImages(dst, src []corev1.Container) {
|
||||
images := make(map[string]string)
|
||||
|
||||
for _, c := range updated {
|
||||
newImages[c.Name] = c.Image
|
||||
for _, container := range src {
|
||||
images[container.Name] = container.Image
|
||||
}
|
||||
|
||||
for i, c := range original {
|
||||
if updatedImage, found := newImages[c.Name]; found {
|
||||
original[i].Image = updatedImage
|
||||
}
|
||||
for i, container := range dst {
|
||||
dst[i].Image = images[container.Name]
|
||||
}
|
||||
|
||||
return original
|
||||
}
|
||||
|
||||
// DeletePod executes deletePod with retry
|
||||
@@ -590,20 +667,24 @@ func (p *Provider) DeletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
// expected to call the NotifyPods callback with a terminal pod status where all the containers are in a terminal
|
||||
// state, as well as the pod. DeletePod may be called multiple times for the same pod.
|
||||
func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
p.logger.Info(fmt.Sprintf("got request to delete pod %s/%s", pod.Namespace, pod.Name))
|
||||
hostName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
hostPodName := p.Translator.TranslateName(pod.Namespace, pod.Name)
|
||||
|
||||
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostName, metav1.DeleteOptions{})
|
||||
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "pod", hostPodName)
|
||||
logger.V(1).Info("DeletePod")
|
||||
|
||||
err := p.CoreClient.Pods(p.ClusterNamespace).Delete(ctx, hostPodName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
p.logger.Info(fmt.Sprintf("pod %s/%s already deleted from host cluster", p.ClusterNamespace, hostName))
|
||||
logger.Info("Pod to delete not found in host cluster")
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to delete pod %s/%s: %w", pod.Namespace, pod.Name, err)
|
||||
logger.Error(err, "Error trying to delete pod from host cluster")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
p.logger.Info(fmt.Sprintf("pod %s/%s deleted from host cluster", p.ClusterNamespace, hostName))
|
||||
logger.Info("Pod deleted from host cluster")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -613,21 +694,18 @@ func (p *Provider) deletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
// concurrently outside of the calling goroutine. Therefore it is recommended
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error) {
|
||||
p.logger.V(1).Info("got a request for get pod", "namespace", namespace, "name", name)
|
||||
hostNamespaceName := types.NamespacedName{
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: p.Translator.TranslateName(namespace, name),
|
||||
hostPodName := p.Translator.TranslateName(namespace, name)
|
||||
|
||||
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName)
|
||||
logger.V(1).Info("GetPod")
|
||||
|
||||
pod, err := p.getPodFromHostCluster(ctx, hostPodName)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error getting pod from host cluster for GetPod")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pod corev1.Pod
|
||||
|
||||
if err := p.HostClient.Get(ctx, hostNamespaceName, &pod); err != nil {
|
||||
return nil, fmt.Errorf("error when retrieving pod: %w", err)
|
||||
}
|
||||
|
||||
p.Translator.TranslateFrom(&pod)
|
||||
|
||||
return &pod, nil
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// GetPodStatus retrieves the status of a pod by name from the provider.
|
||||
@@ -635,28 +713,49 @@ func (p *Provider) GetPod(ctx context.Context, namespace, name string) (*corev1.
|
||||
// concurrently outside of the calling goroutine. Therefore it is recommended
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error) {
|
||||
p.logger.V(1).Info("got a request for pod status", "namespace", namespace, "name", name)
|
||||
hostPodName := p.Translator.TranslateName(namespace, name)
|
||||
|
||||
pod, err := p.GetPod(ctx, namespace, name)
|
||||
logger := p.logger.WithValues("namespace", namespace, "name", name, "pod", hostPodName)
|
||||
logger.V(1).Info("GetPodStatus")
|
||||
|
||||
pod, err := p.getPodFromHostCluster(ctx, hostPodName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get pod for status: %w", err)
|
||||
logger.Error(err, "Error getting pod from host cluster for PodStatus")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.V(1).Info("got pod status", "namespace", namespace, "name", name, "status", pod.Status)
|
||||
|
||||
return pod.Status.DeepCopy(), nil
|
||||
}
|
||||
|
||||
func (p *Provider) getPodFromHostCluster(ctx context.Context, hostPodName string) (*corev1.Pod, error) {
|
||||
key := types.NamespacedName{
|
||||
Namespace: p.ClusterNamespace,
|
||||
Name: hostPodName,
|
||||
}
|
||||
|
||||
var pod corev1.Pod
|
||||
if err := p.HostClient.Get(ctx, key, &pod); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.Translator.TranslateFrom(&pod)
|
||||
|
||||
return &pod, nil
|
||||
}
|
||||
|
||||
// GetPods retrieves a list of all pods running on the provider (can be cached).
|
||||
// The Pods returned are expected to be immutable, and may be accessed
|
||||
// concurrently outside of the calling goroutine. Therefore it is recommended
|
||||
// to return a version after DeepCopy.
|
||||
func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
|
||||
p.logger.V(1).Info("GetPods")
|
||||
|
||||
selector := labels.NewSelector()
|
||||
|
||||
requirement, err := labels.NewRequirement(translate.ClusterNameLabel, selection.Equals, []string{p.ClusterName})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create label selector: %w", err)
|
||||
p.logger.Error(err, "Error creating label selector for GetPods")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
selector = selector.Add(*requirement)
|
||||
@@ -665,7 +764,8 @@ func (p *Provider) GetPods(ctx context.Context) ([]*corev1.Pod, error) {
|
||||
|
||||
err = p.HostClient.List(ctx, &podList, &client.ListOptions{LabelSelector: selector})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list pods: %w", err)
|
||||
p.logger.Error(err, "Error listing pods from host cluster")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
retPods := []*corev1.Pod{}
|
||||
@@ -766,28 +866,91 @@ func mergeEnvVars(orig, updated []corev1.EnvVar) []corev1.EnvVar {
|
||||
return orig
|
||||
}
|
||||
|
||||
func (p *Provider) configurePodEnvs(hostPod, virtualPod *corev1.Pod) {
|
||||
for i := range hostPod.Spec.Containers {
|
||||
hostPod.Spec.Containers[i].Env = p.configureEnv(virtualPod, virtualPod.Spec.Containers[i].Env)
|
||||
hostPod.Spec.Containers[i].EnvFrom = p.configureEnvFrom(virtualPod, virtualPod.Spec.Containers[i].EnvFrom)
|
||||
}
|
||||
|
||||
for i := range hostPod.Spec.InitContainers {
|
||||
hostPod.Spec.InitContainers[i].Env = p.configureEnv(virtualPod, virtualPod.Spec.InitContainers[i].Env)
|
||||
hostPod.Spec.InitContainers[i].EnvFrom = p.configureEnvFrom(virtualPod, virtualPod.Spec.InitContainers[i].EnvFrom)
|
||||
}
|
||||
|
||||
for i := range hostPod.Spec.EphemeralContainers {
|
||||
hostPod.Spec.EphemeralContainers[i].Env = p.configureEnv(virtualPod, virtualPod.Spec.EphemeralContainers[i].Env)
|
||||
hostPod.Spec.EphemeralContainers[i].EnvFrom = p.configureEnvFrom(virtualPod, virtualPod.Spec.EphemeralContainers[i].EnvFrom)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) configureEnv(virtualPod *corev1.Pod, envs []corev1.EnvVar) []corev1.EnvVar {
|
||||
resultingEnvVars := make([]corev1.EnvVar, 0, len(envs))
|
||||
|
||||
for _, envVar := range envs {
|
||||
resultingEnvVar := envVar
|
||||
|
||||
if envVar.ValueFrom != nil {
|
||||
from := envVar.ValueFrom
|
||||
|
||||
switch {
|
||||
case from.FieldRef != nil:
|
||||
fieldRef := from.FieldRef
|
||||
|
||||
// for name and namespace we need to hardcode the virtual cluster values, and clear the FieldRef
|
||||
switch fieldRef.FieldPath {
|
||||
case "metadata.name":
|
||||
resultingEnvVar.Value = virtualPod.Name
|
||||
resultingEnvVar.ValueFrom = nil
|
||||
case "metadata.namespace":
|
||||
resultingEnvVar.Value = virtualPod.Namespace
|
||||
resultingEnvVar.ValueFrom = nil
|
||||
}
|
||||
|
||||
case from.ConfigMapKeyRef != nil:
|
||||
resultingEnvVar.ValueFrom.ConfigMapKeyRef.Name = p.Translator.TranslateName(virtualPod.Namespace, resultingEnvVar.ValueFrom.ConfigMapKeyRef.Name)
|
||||
|
||||
case from.SecretKeyRef != nil:
|
||||
resultingEnvVar.ValueFrom.SecretKeyRef.Name = p.Translator.TranslateName(virtualPod.Namespace, resultingEnvVar.ValueFrom.SecretKeyRef.Name)
|
||||
}
|
||||
}
|
||||
|
||||
resultingEnvVars = append(resultingEnvVars, resultingEnvVar)
|
||||
}
|
||||
|
||||
return resultingEnvVars
|
||||
}
|
||||
|
||||
func (p *Provider) configureEnvFrom(virtualPod *corev1.Pod, envs []corev1.EnvFromSource) []corev1.EnvFromSource {
|
||||
resultingEnvVars := make([]corev1.EnvFromSource, 0, len(envs))
|
||||
|
||||
for _, envVar := range envs {
|
||||
resultingEnvVar := envVar
|
||||
|
||||
if envVar.ConfigMapRef != nil {
|
||||
resultingEnvVar.ConfigMapRef.Name = p.Translator.TranslateName(virtualPod.Namespace, envVar.ConfigMapRef.Name)
|
||||
}
|
||||
|
||||
if envVar.SecretRef != nil {
|
||||
resultingEnvVar.SecretRef.Name = p.Translator.TranslateName(virtualPod.Namespace, envVar.SecretRef.Name)
|
||||
}
|
||||
|
||||
resultingEnvVars = append(resultingEnvVars, resultingEnvVar)
|
||||
}
|
||||
|
||||
return resultingEnvVars
|
||||
}
|
||||
|
||||
// configureFieldPathEnv will retrieve all annotations created by the pod mutating webhook
|
||||
// to assign env fieldpaths to pods, it will also make sure to change the metadata.name and metadata.namespace to the
|
||||
// assigned annotations
|
||||
func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
|
||||
for _, container := range pod.Spec.EphemeralContainers {
|
||||
addFieldPathAnnotationToEnv(container.Env)
|
||||
}
|
||||
// override metadata.name and metadata.namespace with pod annotations
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
addFieldPathAnnotationToEnv(container.Env)
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
addFieldPathAnnotationToEnv(container.Env)
|
||||
}
|
||||
|
||||
for name, value := range pod.Annotations {
|
||||
if strings.Contains(name, webhook.FieldpathField) {
|
||||
containerIndex, envName, err := webhook.ParseFieldPathAnnotationKey(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// re-adding these envs to the pod
|
||||
tPod.Spec.Containers[containerIndex].Env = append(tPod.Spec.Containers[containerIndex].Env, corev1.EnvVar{
|
||||
Name: envName,
|
||||
@@ -797,6 +960,7 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// removing the annotation from the pod
|
||||
delete(tPod.Annotations, name)
|
||||
}
|
||||
@@ -804,22 +968,3 @@ func (p *Provider) configureFieldPathEnv(pod, tPod *corev1.Pod) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addFieldPathAnnotationToEnv(envVars []corev1.EnvVar) {
|
||||
for j, envVar := range envVars {
|
||||
if envVar.ValueFrom == nil || envVar.ValueFrom.FieldRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldPath := envVar.ValueFrom.FieldRef.FieldPath
|
||||
if fieldPath == translate.MetadataNameField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNameAnnotation)
|
||||
envVars[j] = envVar
|
||||
}
|
||||
|
||||
if fieldPath == translate.MetadataNamespaceField {
|
||||
envVar.ValueFrom.FieldRef.FieldPath = fmt.Sprintf("metadata.annotations['%s']", translate.ResourceNamespaceAnnotation)
|
||||
envVars[j] = envVar
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,12 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
)
|
||||
|
||||
func Test_mergeEnvVars(t *testing.T) {
|
||||
@@ -68,3 +73,230 @@ func Test_mergeEnvVars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_configureEnv(t *testing.T) {
|
||||
virtualPod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-pod",
|
||||
Namespace: "my-namespace",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
virtualPod *corev1.Pod
|
||||
envs []corev1.EnvVar
|
||||
want []corev1.EnvVar
|
||||
}{
|
||||
{
|
||||
name: "empty envs",
|
||||
virtualPod: virtualPod,
|
||||
envs: []corev1.EnvVar{},
|
||||
want: []corev1.EnvVar{},
|
||||
},
|
||||
{
|
||||
name: "simple env var",
|
||||
virtualPod: virtualPod,
|
||||
envs: []corev1.EnvVar{
|
||||
{Name: "MY_VAR", Value: "my-value"},
|
||||
},
|
||||
want: []corev1.EnvVar{
|
||||
{Name: "MY_VAR", Value: "my-value"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata.name field ref",
|
||||
virtualPod: virtualPod,
|
||||
envs: []corev1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []corev1.EnvVar{
|
||||
{Name: "POD_NAME", Value: "my-pod"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata.namespace field ref",
|
||||
virtualPod: virtualPod,
|
||||
envs: []corev1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []corev1.EnvVar{
|
||||
{Name: "POD_NAMESPACE", Value: "my-namespace"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "other field ref",
|
||||
virtualPod: virtualPod,
|
||||
envs: []corev1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []corev1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "secret key ref",
|
||||
virtualPod: virtualPod,
|
||||
envs: []corev1.EnvVar{
|
||||
{
|
||||
Name: "SECRET_VAR",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"},
|
||||
Key: "my-key",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []corev1.EnvVar{
|
||||
{
|
||||
Name: "SECRET_VAR",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret-my-namespace-c-test-6d792d7365637265742b6d792d6-887db"},
|
||||
Key: "my-key",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "configmap key ref",
|
||||
virtualPod: virtualPod,
|
||||
envs: []corev1.EnvVar{
|
||||
{
|
||||
Name: "CONFIG_VAR",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: "my-configmap"},
|
||||
Key: "my-key",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []corev1.EnvVar{
|
||||
{
|
||||
Name: "CONFIG_VAR",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: "my-configmap-my-namespace-c-test-6d792d636f6e6669676d6170-301f6"},
|
||||
Key: "my-key",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "resource field ref",
|
||||
virtualPod: virtualPod,
|
||||
envs: []corev1.EnvVar{
|
||||
{
|
||||
Name: "CPU_LIMIT",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ResourceFieldRef: &corev1.ResourceFieldSelector{
|
||||
ContainerName: "my-container",
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []corev1.EnvVar{
|
||||
{
|
||||
Name: "CPU_LIMIT",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ResourceFieldRef: &corev1.ResourceFieldSelector{
|
||||
ContainerName: "my-container",
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed env vars",
|
||||
virtualPod: virtualPod,
|
||||
envs: []corev1.EnvVar{
|
||||
{Name: "MY_VAR", Value: "my-value"},
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []corev1.EnvVar{
|
||||
{Name: "MY_VAR", Value: "my-value"},
|
||||
{Name: "POD_NAME", Value: "my-pod"},
|
||||
{Name: "POD_NAMESPACE", Value: "my-namespace"},
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
p := Provider{
|
||||
Translator: translate.ToHostTranslator{
|
||||
ClusterName: "c-test",
|
||||
ClusterNamespace: "ns-test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := p.configureEnv(tt.virtualPod, tt.envs)
|
||||
assert.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,8 @@ const (
|
||||
// transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it
|
||||
// to look like the serviceaccount token
|
||||
func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error {
|
||||
p.logger.Info("transforming token", "pod", pod.Name, "namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName)
|
||||
logger := p.logger.WithValues("namespace", pod.Namespace, "name", pod.Name, "serviceAccountNameod", pod.Spec.ServiceAccountName)
|
||||
logger.V(1).Info("Transforming token")
|
||||
|
||||
// skip this process if the kube-api-access is already removed from the pod
|
||||
// this is needed in case users already adds their own custom tokens like in rancher imported clusters
|
||||
|
||||
@@ -381,6 +381,11 @@ func (s *SharedAgent) role(ctx context.Context) error {
|
||||
Resources: []string{"persistentvolumeclaims", "pods", "pods/log", "pods/attach", "pods/exec", "pods/ephemeralcontainers", "secrets", "configmaps", "services"},
|
||||
Verbs: []string{"*"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"events"},
|
||||
Verbs: []string{"create"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"networking.k8s.io"},
|
||||
Resources: []string{"ingresses"},
|
||||
|
||||
@@ -23,7 +23,7 @@ func newVirtualClient(ctx context.Context, hostClient ctrlruntimeclient.Client,
|
||||
}
|
||||
|
||||
if err := hostClient.Get(ctx, kubeconfigSecretName, &clusterKubeConfig); err != nil {
|
||||
return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
restConfig, err := clientcmd.RESTConfigFromKubeConfig(clusterKubeConfig.Data["kubeconfig.yaml"])
|
||||
|
||||
@@ -269,8 +269,8 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
|
||||
|
||||
// if the Version is not specified we will try to use the same Kubernetes version of the host.
|
||||
// This version is stored in the Status object, and it will not be updated if already set.
|
||||
if cluster.Spec.Version == "" && cluster.Status.HostVersion == "" {
|
||||
log.V(1).Info("Cluster version not set. Using host version.")
|
||||
if cluster.Status.HostVersion == "" {
|
||||
log.V(1).Info("Cluster host version not set.")
|
||||
|
||||
hostVersion, err := c.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
@@ -278,8 +278,9 @@ func (c *ClusterReconciler) reconcile(ctx context.Context, cluster *v1beta1.Clus
|
||||
}
|
||||
|
||||
// update Status HostVersion
|
||||
k8sVersion := strings.Split(hostVersion.GitVersion, "+")[0]
|
||||
cluster.Status.HostVersion = k8sVersion + "-k3s1"
|
||||
k8sVersion, _, _ := strings.Cut(hostVersion.GitVersion, "+")
|
||||
k8sVersion, _, _ = strings.Cut(k8sVersion, "-")
|
||||
cluster.Status.HostVersion = k8sVersion
|
||||
}
|
||||
|
||||
token, err := c.token(ctx, cluster)
|
||||
|
||||
@@ -2,7 +2,6 @@ package cluster_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
@@ -73,7 +72,6 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
|
||||
serverVersion, err := k8s.ServerVersion()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
expectedHostVersion := fmt.Sprintf("%s-k3s1", serverVersion.GitVersion)
|
||||
|
||||
Eventually(func() string {
|
||||
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)
|
||||
@@ -82,7 +80,7 @@ var _ = Describe("Cluster Controller", Label("controller"), Label("Cluster"), fu
|
||||
}).
|
||||
WithTimeout(time.Second * 30).
|
||||
WithPolling(time.Second).
|
||||
Should(Equal(expectedHostVersion))
|
||||
Should(Equal(serverVersion.GitVersion))
|
||||
|
||||
// check NetworkPolicy
|
||||
expectedNetworkPolicy := &networkingv1.NetworkPolicy{
|
||||
|
||||
@@ -416,9 +416,11 @@ func (s *Server) setupDynamicPersistence() v1.PersistentVolumeClaim {
|
||||
func (s *Server) setupStartCommand() (string, error) {
|
||||
var output bytes.Buffer
|
||||
|
||||
tmpl := singleServerTemplate
|
||||
tmpl := StartupCommand
|
||||
|
||||
mode := "single"
|
||||
if *s.cluster.Spec.Servers > 1 {
|
||||
tmpl = HAServerTemplate
|
||||
mode = "ha"
|
||||
}
|
||||
|
||||
tmplCmd, err := template.New("").Parse(tmpl)
|
||||
@@ -430,6 +432,8 @@ func (s *Server) setupStartCommand() (string, error) {
|
||||
"ETCD_DIR": "/var/lib/rancher/k3s/server/db/etcd",
|
||||
"INIT_CONFIG": "/opt/rancher/k3s/init/config.yaml",
|
||||
"SERVER_CONFIG": "/opt/rancher/k3s/server/config.yaml",
|
||||
"CLUSTER_MODE": mode,
|
||||
"K3K_MODE": string(s.cluster.Spec.Mode),
|
||||
"EXTRA_ARGS": strings.Join(s.cluster.Spec.ServerArgs, " "),
|
||||
}); err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -1,16 +1,102 @@
|
||||
package server
|
||||
|
||||
var singleServerTemplate string = `
|
||||
if [ -d "{{.ETCD_DIR}}" ]; then
|
||||
# if directory exists then it means its not an initial run
|
||||
/bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
|
||||
fi
|
||||
rm -f /var/lib/rancher/k3s/server/db/reset-flag
|
||||
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log`
|
||||
var StartupCommand string = `
|
||||
info()
|
||||
{
|
||||
echo "[INFO] [$(date +"%c")]" "$@"
|
||||
}
|
||||
|
||||
fatal()
|
||||
{
|
||||
echo "[FATAL] [$(date +"%c")] " "$@" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# safe mode function to reset node IP after pod restarts
|
||||
safe_mode() {
|
||||
CURRENT_IP=""
|
||||
if [ -f /var/lib/rancher/k3s/k3k-node-ip ]; then
|
||||
CURRENT_IP=$(cat /var/lib/rancher/k3s/k3k-node-ip)
|
||||
fi
|
||||
|
||||
if [ -z "$CURRENT_IP" ] || [ "$CURRENT_IP" = "$POD_IP" ] || [ {{.K3K_MODE}} != "virtual" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# skipping if the node is starting for the first time
|
||||
if [ -d "{{.ETCD_DIR}}" ]; then
|
||||
|
||||
info "Starting K3s in Safe Mode (Network Policy Disabled) to patch Node IP from ${CURRENT_IP} to ${POD_IP}"
|
||||
/bin/k3s server --disable-network-policy --config $1 {{.EXTRA_ARGS}} > /dev/null 2>&1 &
|
||||
PID=$!
|
||||
|
||||
# Start the loop to wait for the nodeIP to change
|
||||
info "Waiting for Node IP to update to ${POD_IP}."
|
||||
count=0
|
||||
until kubectl get nodes -o wide 2>/dev/null | grep -q "${POD_IP}"; do
|
||||
if ! kill -0 $PID 2>/dev/null; then
|
||||
fatal "safe Mode K3s process died unexpectedly!"
|
||||
fi
|
||||
sleep 2
|
||||
count=$((count+1))
|
||||
|
||||
if [ $count -gt 60 ]; then
|
||||
fatal "timed out waiting for node to change IP from $CURRENT_IP to $POD_IP"
|
||||
fi
|
||||
done
|
||||
|
||||
info "Node IP is set to ${POD_IP} successfully. Stopping Safe Mode process..."
|
||||
kill $PID
|
||||
wait $PID 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
start_single_node() {
|
||||
info "Starting single node setup..."
|
||||
|
||||
# checking for existing data in single server if found we must perform reset
|
||||
if [ -d "{{.ETCD_DIR}}" ]; then
|
||||
info "Existing data found in single node setup. Performing cluster-reset to ensure quorum..."
|
||||
|
||||
if ! /bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} > /dev/null 2>&1; then
|
||||
fatal "cluster reset failed!"
|
||||
fi
|
||||
info "Cluster reset complete. Removing Reset flag file."
|
||||
rm -f /var/lib/rancher/k3s/server/db/reset-flag
|
||||
fi
|
||||
|
||||
# entering safe mode to ensure correct NodeIP
|
||||
safe_mode {{.INIT_CONFIG}}
|
||||
|
||||
info "Adding pod IP file."
|
||||
echo $POD_IP > /var/lib/rancher/k3s/k3k-node-ip
|
||||
|
||||
var HAServerTemplate string = `
|
||||
if [ ${POD_NAME: -1} == 0 ] && [ ! -d "{{.ETCD_DIR}}" ]; then
|
||||
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
|
||||
else
|
||||
/bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
|
||||
fi`
|
||||
}
|
||||
|
||||
start_ha_node() {
|
||||
info "Starting pod $POD_NAME in HA node setup"
|
||||
|
||||
if [ ${POD_NAME: -1} == 0 ] && [ ! -d "{{.ETCD_DIR}}" ]; then
|
||||
info "Adding pod IP file."
|
||||
echo $POD_IP > /var/lib/rancher/k3s/k3k-node-ip
|
||||
|
||||
/bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log
|
||||
else
|
||||
safe_mode {{.SERVER_CONFIG}}
|
||||
|
||||
info "Adding pod IP file."
|
||||
echo $POD_IP > /var/lib/rancher/k3s/k3k-node-ip
|
||||
|
||||
/bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.info
|
||||
fi
|
||||
}
|
||||
|
||||
case "{{.CLUSTER_MODE}}" in
|
||||
"ha")
|
||||
start_ha_node
|
||||
;;
|
||||
"single"|*)
|
||||
start_single_node
|
||||
;;
|
||||
esac`
|
||||
|
||||
@@ -120,7 +120,7 @@ func (p *StatefulSetReconciler) handleServerPod(ctx context.Context, cluster v1b
|
||||
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
if controllerutil.AddFinalizer(pod, etcdPodFinalizerName) {
|
||||
log.V(1).Info("Server Pod is being deleted. Removing finalizer", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
log.V(1).Info("Server Pod is being created. Adding finalizer", "pod", pod.Name, "namespace", pod.Namespace)
|
||||
|
||||
return p.Client.Update(ctx, pod)
|
||||
}
|
||||
|
||||
@@ -25,21 +25,24 @@ var Backoff = wait.Backoff{
|
||||
Jitter: 0.1,
|
||||
}
|
||||
|
||||
// Image returns the rancher/k3s image tagged with the specified Version.
|
||||
// If Version is empty it will use with the same k8s version of the host cluster,
|
||||
// stored in the Status object. It will return the latest version as last fallback.
|
||||
// K3SImage returns the rancher/k3s image tagged with the found K3SVersion.
|
||||
func K3SImage(cluster *v1beta1.Cluster, k3SImage string) string {
|
||||
image := k3SImage
|
||||
|
||||
imageVersion := "latest"
|
||||
return k3SImage + ":" + K3SVersion(cluster)
|
||||
}
|
||||
|
||||
// K3SVersion returns the rancher/k3s specified version.
|
||||
// If empty it will return the k3s version of the Kubernetes version of the host cluster, stored in the Status object.
|
||||
// Returns the latest version as fallback.
|
||||
func K3SVersion(cluster *v1beta1.Cluster) string {
|
||||
if cluster.Spec.Version != "" {
|
||||
imageVersion = cluster.Spec.Version
|
||||
} else if cluster.Status.HostVersion != "" {
|
||||
imageVersion = cluster.Status.HostVersion
|
||||
return cluster.Spec.Version
|
||||
}
|
||||
|
||||
return image + ":" + imageVersion
|
||||
if cluster.Status.HostVersion != "" {
|
||||
return cluster.Status.HostVersion + "-k3s1"
|
||||
}
|
||||
|
||||
return "latest"
|
||||
}
|
||||
|
||||
// SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix.
|
||||
|
||||
@@ -51,7 +51,7 @@ func Test_K3S_Image(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedData: "rancher/k3s:v4.5.6",
|
||||
expectedData: "rancher/k3s:v4.5.6-k3s1",
|
||||
},
|
||||
{
|
||||
name: "cluster with empty version spec and empty hostVersion status",
|
||||
|
||||
42
scripts/generate-cli-docs
Executable file
42
scripts/generate-cli-docs
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eou pipefail
|
||||
|
||||
DOCS_DIR=./docs/cli
|
||||
|
||||
# Generate the raw markdown files
|
||||
go run $DOCS_DIR/genclidoc.go
|
||||
|
||||
echo "Converting Markdown documentation to asciidoc"
|
||||
|
||||
pandoc --from markdown --to asciidoc --lua-filter=$DOCS_DIR/convert.lua $DOCS_DIR/k3kcli.md > $DOCS_DIR/k3kcli.adoc
|
||||
echo "" >> $DOCS_DIR/k3kcli.adoc
|
||||
|
||||
# We use an explicit list of files to keep a stable ordering
|
||||
SUBCOMMAND_FILES=(
|
||||
"$DOCS_DIR/k3kcli_cluster.md"
|
||||
"$DOCS_DIR/k3kcli_cluster_create.md"
|
||||
"$DOCS_DIR/k3kcli_cluster_delete.md"
|
||||
"$DOCS_DIR/k3kcli_cluster_list.md"
|
||||
"$DOCS_DIR/k3kcli_kubeconfig.md"
|
||||
"$DOCS_DIR/k3kcli_kubeconfig_generate.md"
|
||||
"$DOCS_DIR/k3kcli_policy.md"
|
||||
"$DOCS_DIR/k3kcli_policy_create.md"
|
||||
"$DOCS_DIR/k3kcli_policy_delete.md"
|
||||
"$DOCS_DIR/k3kcli_policy_list.md"
|
||||
)
|
||||
|
||||
# Check for newly added doc files
|
||||
EXPECTED_COUNT=${#SUBCOMMAND_FILES[@]}
|
||||
ACTUAL_COUNT=$(ls -1 $DOCS_DIR/k3kcli_*.md | wc -l)
|
||||
|
||||
if [ "$EXPECTED_COUNT" -ne "$ACTUAL_COUNT" ]; then
|
||||
echo "ERROR: File count mismatch!"
|
||||
echo "Expected $EXPECTED_COUNT files in the array, but found $ACTUAL_COUNT on disk."
|
||||
echo "Please update the SUBCOMMAND_FILES array in $0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pandoc --from markdown --to asciidoc --lua-filter=$DOCS_DIR/convert.lua "${SUBCOMMAND_FILES[@]}">> $DOCS_DIR/k3kcli.adoc
|
||||
|
||||
echo "Asciidoc documentation generated at $DOCS_DIR/k3kcli.adoc"
|
||||
@@ -6,20 +6,29 @@ import (
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/rancher/k3k/pkg/controller/policy"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func K3kcli(args ...string) (string, string, error) {
|
||||
return runCmd("k3kcli", args...)
|
||||
}
|
||||
|
||||
func Kubectl(args ...string) (string, string, error) {
|
||||
return runCmd("kubectl", args...)
|
||||
}
|
||||
|
||||
func runCmd(cmdName string, args ...string) (string, string, error) {
|
||||
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
||||
|
||||
cmd := exec.CommandContext(context.Background(), "k3kcli", args...)
|
||||
cmd := exec.CommandContext(context.Background(), cmdName, args...)
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
@@ -47,12 +56,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
|
||||
DeferCleanup(func() {
|
||||
err := k8sClient.Delete(context.Background(), &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterNamespace,
|
||||
},
|
||||
})
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", clusterName)
|
||||
@@ -78,6 +82,24 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
WithPolling(time.Second).
|
||||
Should(BeEmpty())
|
||||
})
|
||||
|
||||
It("can create a cluster with the specified kubernetes version", func() {
|
||||
var (
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", "--version", "v1.33.6-k3s1", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
})
|
||||
})
|
||||
|
||||
When("trying the policy commands", func() {
|
||||
@@ -106,8 +128,76 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "list")
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(BeEmpty())
|
||||
Expect(stdout).To(Not(ContainSubstring(policyName)))
|
||||
})
|
||||
|
||||
It("can bound a policy to a namespace", func() {
|
||||
var (
|
||||
stdout string
|
||||
stderr string
|
||||
err error
|
||||
)
|
||||
|
||||
namespaceName := "ns-" + rand.String(5)
|
||||
|
||||
_, _, err = Kubectl("create", "namespace", namespaceName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespaceName)
|
||||
})
|
||||
|
||||
By("Creating a policy and binding to a namespace")
|
||||
|
||||
policy1Name := "policy-" + rand.String(5)
|
||||
|
||||
_, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, policy1Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy1Name))
|
||||
|
||||
DeferCleanup(func() {
|
||||
stdout, stderr, err = K3kcli("policy", "delete", policy1Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policy1Name))
|
||||
})
|
||||
|
||||
var ns v1.Namespace
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy1Name))
|
||||
|
||||
By("Creating another policy and binding to the same namespace without the --overwrite flag")
|
||||
|
||||
policy2Name := "policy-" + rand.String(5)
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, policy2Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy2Name))
|
||||
|
||||
DeferCleanup(func() {
|
||||
stdout, stderr, err = K3kcli("policy", "delete", policy2Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stdout).To(BeEmpty())
|
||||
Expect(stderr).To(ContainSubstring(`Policy '%s' deleted`, policy2Name))
|
||||
})
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy1Name))
|
||||
|
||||
By("Forcing the other policy binding with the overwrite flag")
|
||||
|
||||
stdout, stderr, err = K3kcli("policy", "create", "--namespace", namespaceName, "--overwrite", policy2Name)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Creating policy '%s'`, policy2Name))
|
||||
|
||||
err = k8sClient.Get(context.Background(), types.NamespacedName{Name: namespaceName}, &ns)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(ns.Name).To(Equal(namespaceName))
|
||||
Expect(ns.Labels).To(HaveKeyWithValue(policy.PolicyNameLabelKey, policy2Name))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -122,12 +212,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
|
||||
DeferCleanup(func() {
|
||||
err := k8sClient.Delete(context.Background(), &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterNamespace,
|
||||
},
|
||||
})
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", clusterName)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label("e2e"), Label(certificatesTestsLabel), func() {
|
||||
var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label(e2eTestLabel), Label(certificatesTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("two virtual clusters are installed", Label("e2e"), Label(networkingTestsLabel), func() {
|
||||
var _ = When("two virtual clusters are installed", Label(e2eTestLabel), Label(networkingTestsLabel), func() {
|
||||
var (
|
||||
cluster1 *VirtualCluster
|
||||
cluster2 *VirtualCluster
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("an ephemeral cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
|
||||
var _ = When("an ephemeral cluster is installed", Label(e2eTestLabel), Label(persistenceTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
@@ -110,7 +110,7 @@ var _ = When("an ephemeral cluster is installed", Label("e2e"), Label(persistenc
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a dynamic cluster is installed", Label("e2e"), Label(persistenceTestsLabel), func() {
|
||||
var _ = When("a dynamic cluster is installed", Label(e2eTestLabel), Label(persistenceTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeEach(func() {
|
||||
|
||||
281
tests/cluster_pod_test.go
Normal file
281
tests/cluster_pod_test.go
Normal file
@@ -0,0 +1,281 @@
|
||||
package k3k_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/rancher/k3k/k3k-kubelet/translate"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Context("In a shared cluster", Label(e2eTestLabel), Ordered, func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
|
||||
BeforeAll(func() {
|
||||
virtualCluster = NewVirtualCluster()
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(virtualCluster.Cluster.Namespace)
|
||||
})
|
||||
})
|
||||
|
||||
When("creating a Pod with an invalid configuration", func() {
|
||||
var virtualPod *v1.Pod
|
||||
|
||||
BeforeEach(func() {
|
||||
p := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "nginx-",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"name": "var-expansion-test",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"notmysubpath": "mypath",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ANNOTATION",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.annotations['mysubpath']",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "workdir",
|
||||
MountPath: "/volume_mount",
|
||||
},
|
||||
{
|
||||
Name: "workdir",
|
||||
MountPath: "/subpath_mount",
|
||||
SubPathExpr: "$(ANNOTATION)/$(POD_NAME)",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{{
|
||||
Name: "workdir",
|
||||
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var err error
|
||||
|
||||
virtualPod, err = virtualCluster.Client.CoreV1().Pods(p.Namespace).Create(ctx, p, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("should be in Pending status with the CreateContainerConfigError until we fix the annotation", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
By("Checking the container status of the Pod in the Virtual Cluster")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
pod, err := virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Get(ctx, virtualPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodPending))
|
||||
|
||||
envVars := pod.Spec.Containers[0].Env
|
||||
g.Expect(envVars).NotTo(BeEmpty())
|
||||
|
||||
var found bool
|
||||
for _, envVar := range envVars {
|
||||
if envVar.Name == "POD_NAME" {
|
||||
found = true
|
||||
|
||||
g.Expect(envVars[0].ValueFrom).NotTo(BeNil())
|
||||
g.Expect(envVars[0].ValueFrom.FieldRef).NotTo(BeNil())
|
||||
g.Expect(envVars[0].ValueFrom.FieldRef.FieldPath).To(Equal("metadata.name"))
|
||||
break
|
||||
}
|
||||
}
|
||||
g.Expect(found).To(BeTrue())
|
||||
|
||||
containerStatuses := pod.Status.ContainerStatuses
|
||||
g.Expect(containerStatuses).To(HaveLen(1))
|
||||
|
||||
waitingState := containerStatuses[0].State.Waiting
|
||||
g.Expect(waitingState).NotTo(BeNil())
|
||||
g.Expect(waitingState.Reason).To(Equal("CreateContainerConfigError"))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
|
||||
By("Checking the container status of the Pod in the Host Cluster")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
translator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
hostPodName := translator.NamespacedName(virtualPod)
|
||||
|
||||
pod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
g.Expect(pod.Status.Phase).To(Equal(v1.PodPending))
|
||||
|
||||
envVars := pod.Spec.Containers[0].Env
|
||||
g.Expect(envVars).NotTo(BeEmpty())
|
||||
|
||||
var found bool
|
||||
for _, envVar := range envVars {
|
||||
if envVar.Name == "POD_NAME" {
|
||||
found = true
|
||||
|
||||
g.Expect(envVar.ValueFrom).To(BeNil())
|
||||
g.Expect(envVar.Value).To(Equal(virtualPod.Name))
|
||||
break
|
||||
}
|
||||
}
|
||||
g.Expect(found).To(BeTrue())
|
||||
|
||||
containerStatuses := pod.Status.ContainerStatuses
|
||||
g.Expect(containerStatuses).To(HaveLen(1))
|
||||
|
||||
waitingState := containerStatuses[0].State.Waiting
|
||||
g.Expect(waitingState).NotTo(BeNil())
|
||||
g.Expect(waitingState.Reason).To(Equal("CreateContainerConfigError"))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
|
||||
By("Fixing the annotation")
|
||||
|
||||
var err error
|
||||
|
||||
virtualPod, err = virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Get(ctx, virtualPod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
virtualPod.Annotations["mysubpath"] = virtualPod.Annotations["notmysubpath"]
|
||||
delete(virtualPod.Annotations, "notmysubpath")
|
||||
|
||||
virtualPod, err = virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Update(ctx, virtualPod, metav1.UpdateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking the status of the Pod in the Virtual Cluster")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
vPod, err := virtualCluster.Client.CoreV1().Pods(virtualPod.Namespace).Get(ctx, virtualPod.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, cond := pod.GetPodCondition(&vPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
|
||||
By("Checking the status of the Pod in the Host Cluster")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
translator := translate.NewHostTranslator(virtualCluster.Cluster)
|
||||
hostPodName := translator.NamespacedName(virtualPod)
|
||||
|
||||
hPod, err := k8s.CoreV1().Pods(hostPodName.Namespace).Get(ctx, hostPodName.Name, metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, cond := pod.GetPodCondition(&hPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(time.Minute).
|
||||
Should(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
When("installing the nginx-ingress controller", func() {
|
||||
BeforeAll(func() {
|
||||
By("installing the nginx-ingress controller")
|
||||
|
||||
Expect(os.WriteFile("vk-kubeconfig.yaml", virtualCluster.Kubeconfig, 0o644)).To(Succeed())
|
||||
|
||||
ingressNginx := "testdata/resources/ingress-nginx-v1.14.1.yaml"
|
||||
cmd := exec.Command("kubectl", "apply", "--kubeconfig", "vk-kubeconfig.yaml", "-f", ingressNginx)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
fmt.Println("#### output", "\n", string(output))
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), string(output))
|
||||
})
|
||||
|
||||
expectJobToSucceed := func(client *kubernetes.Clientset, namespace string, label string) {
|
||||
GinkgoHelper()
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
listOpts := metav1.ListOptions{LabelSelector: "job-name=" + label}
|
||||
pods, err := client.CoreV1().Pods(namespace).List(context.Background(), listOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(pods.Items).NotTo(BeEmpty())
|
||||
|
||||
g.Expect(pods.Items[0].Status.Phase).To(Equal(v1.PodSucceeded))
|
||||
}).
|
||||
WithPolling(time.Second).
|
||||
WithTimeout(2 * time.Minute).
|
||||
Should(Succeed())
|
||||
}
|
||||
|
||||
It("should complete the ingress-nginx-admission-create Job in the host cluster", func() {
|
||||
expectJobToSucceed(k8s, virtualCluster.Cluster.Namespace, "ingress-nginx-admission-create")
|
||||
})
|
||||
|
||||
It("should complete the ingress-nginx-admission-create Job in the virtual cluster", func() {
|
||||
expectJobToSucceed(virtualCluster.Client, "ingress-nginx", "ingress-nginx-admission-create")
|
||||
})
|
||||
|
||||
It("should complete the ingress-nginx-admission-patch Job in the host cluster", func() {
|
||||
expectJobToSucceed(k8s, virtualCluster.Cluster.Namespace, "ingress-nginx-admission-patch")
|
||||
})
|
||||
|
||||
It("should complete the ingress-nginx-admission-patch Job in the virtual cluster", func() {
|
||||
expectJobToSucceed(virtualCluster.Client, "ingress-nginx", "ingress-nginx-admission-patch")
|
||||
})
|
||||
|
||||
It("should run the ingress-nginx controller", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
ctx := context.Background()
|
||||
deployment, err := virtualCluster.Client.AppsV1().Deployments("ingress-nginx").Get(ctx, "ingress-nginx-controller", metav1.GetOptions{})
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
desiredReplicas := *deployment.Spec.Replicas
|
||||
|
||||
status := deployment.Status
|
||||
g.Expect(status.ObservedGeneration).To(BeNumerically(">=", deployment.Generation))
|
||||
g.Expect(status.UpdatedReplicas).To(BeNumerically("==", desiredReplicas))
|
||||
g.Expect(status.AvailableReplicas).To(BeNumerically("==", desiredReplicas))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a cluster's status is tracked", Label("e2e"), Label(statusTestsLabel), func() {
|
||||
var _ = When("a cluster's status is tracked", Label(e2eTestLabel), Label(statusTestsLabel), func() {
|
||||
var (
|
||||
namespace *corev1.Namespace
|
||||
vcp *v1beta1.VirtualClusterPolicy
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a shared mode cluster is created", Ordered, Label("e2e"), func() {
|
||||
var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
virtualConfigMap *corev1.ConfigMap
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a shared mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
@@ -127,6 +127,11 @@ var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(update
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
serverPod := serverPods[0]
|
||||
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
|
||||
g.Expect(ok).To(BeTrue())
|
||||
g.Expect(serverEnv1).To(Equal("upgraded"))
|
||||
@@ -145,6 +150,11 @@ var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(update
|
||||
aPods := listAgentPods(ctx, virtualCluster)
|
||||
g.Expect(aPods).To(HaveLen(len(nodes.Items)))
|
||||
|
||||
agentPod := aPods[0]
|
||||
_, cond = pod.GetPodCondition(&agentPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
|
||||
g.Expect(ok).To(BeTrue())
|
||||
g.Expect(agentEnv1).To(Equal("upgraded"))
|
||||
@@ -162,7 +172,7 @@ var _ = When("a shared mode cluster update its envs", Label("e2e"), Label(update
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a shared mode cluster update its server args", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a shared mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
@@ -213,6 +223,11 @@ var _ = When("a shared mode cluster update its server args", Label("e2e"), Label
|
||||
sPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(sPods)).To(Equal(1))
|
||||
|
||||
serverPod := sPods[0]
|
||||
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
@@ -221,7 +236,7 @@ var _ = When("a shared mode cluster update its server args", Label("e2e"), Label
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a virtual mode cluster update its envs", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
@@ -330,6 +345,11 @@ var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updat
|
||||
serverPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(serverPods)).To(Equal(1))
|
||||
|
||||
serverPod := serverPods[0]
|
||||
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
serverEnv1, ok := getEnv(&serverPods[0], "TEST_SERVER_ENV_1")
|
||||
g.Expect(ok).To(BeTrue())
|
||||
g.Expect(serverEnv1).To(Equal("upgraded"))
|
||||
@@ -345,6 +365,11 @@ var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updat
|
||||
aPods := listAgentPods(ctx, virtualCluster)
|
||||
g.Expect(len(aPods)).To(Equal(1))
|
||||
|
||||
agentPod := aPods[0]
|
||||
_, cond = pod.GetPodCondition(&agentPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
agentEnv1, ok := getEnv(&aPods[0], "TEST_AGENT_ENV_1")
|
||||
g.Expect(ok).To(BeTrue())
|
||||
g.Expect(agentEnv1).To(Equal("upgraded"))
|
||||
@@ -362,7 +387,7 @@ var _ = When("a virtual mode cluster update its envs", Label("e2e"), Label(updat
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster update its server args", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a virtual mode cluster update its server args", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var virtualCluster *VirtualCluster
|
||||
ctx := context.Background()
|
||||
BeforeEach(func() {
|
||||
@@ -416,6 +441,11 @@ var _ = When("a virtual mode cluster update its server args", Label("e2e"), Labe
|
||||
sPods := listServerPods(ctx, virtualCluster)
|
||||
g.Expect(len(sPods)).To(Equal(1))
|
||||
|
||||
serverPod := sPods[0]
|
||||
_, cond := pod.GetPodCondition(&serverPod.Status, v1.PodReady)
|
||||
g.Expect(cond).NotTo(BeNil())
|
||||
g.Expect(cond.Status).To(BeEquivalentTo(metav1.ConditionTrue))
|
||||
|
||||
g.Expect(isArgFound(&sPods[0], "--node-label=test_server=upgraded")).To(BeTrue())
|
||||
}).
|
||||
WithPolling(time.Second * 2).
|
||||
@@ -424,7 +454,7 @@ var _ = When("a virtual mode cluster update its server args", Label("e2e"), Labe
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a shared mode cluster update its version", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a shared mode cluster update its version", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
@@ -510,7 +540,7 @@ var _ = When("a shared mode cluster update its version", Label("e2e"), Label(upd
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster update its version", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a virtual mode cluster update its version", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
@@ -611,7 +641,7 @@ var _ = When("a virtual mode cluster update its version", Label("e2e"), Label(up
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a shared mode cluster scales up servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a shared mode cluster scales up servers", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
@@ -696,7 +726,7 @@ var _ = When("a shared mode cluster scales up servers", Label("e2e"), Label(upda
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a shared mode cluster scales down servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a shared mode cluster scales down servers", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
@@ -783,7 +813,7 @@ var _ = When("a shared mode cluster scales down servers", Label("e2e"), Label(up
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster scales up servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a virtual mode cluster scales up servers", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
@@ -868,7 +898,7 @@ var _ = When("a virtual mode cluster scales up servers", Label("e2e"), Label(upd
|
||||
})
|
||||
})
|
||||
|
||||
var _ = When("a virtual mode cluster scales down servers", Label("e2e"), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var _ = When("a virtual mode cluster scales down servers", Label(e2eTestLabel), Label(updateTestsLabel), Label(slowTestsLabel), func() {
|
||||
var (
|
||||
virtualCluster *VirtualCluster
|
||||
nginxPod *v1.Pod
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -35,6 +36,7 @@ type VirtualCluster struct {
|
||||
Cluster *v1beta1.Cluster
|
||||
RestConfig *rest.Config
|
||||
Client *kubernetes.Clientset
|
||||
Kubeconfig []byte
|
||||
}
|
||||
|
||||
func NewVirtualCluster() *VirtualCluster { // By default, create an ephemeral cluster
|
||||
@@ -53,7 +55,7 @@ func NewVirtualClusterWithType(persistenceType v1beta1.PersistenceMode) *Virtual
|
||||
|
||||
CreateCluster(cluster)
|
||||
|
||||
client, restConfig := NewVirtualK8sClientAndConfig(cluster)
|
||||
client, restConfig, kubeconfig := NewVirtualK8sClientAndKubeconfig(cluster)
|
||||
|
||||
By(fmt.Sprintf("Created virtual cluster %s/%s", cluster.Namespace, cluster.Name))
|
||||
|
||||
@@ -61,6 +63,7 @@ func NewVirtualClusterWithType(persistenceType v1beta1.PersistenceMode) *Virtual
|
||||
Cluster: cluster,
|
||||
RestConfig: restConfig,
|
||||
Client: client,
|
||||
Kubeconfig: kubeconfig,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,24 +116,18 @@ func DeleteNamespaces(names ...string) {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
|
||||
deleteNamespace(name)
|
||||
By(fmt.Sprintf("Deleting namespace %s", name))
|
||||
|
||||
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
|
||||
GracePeriodSeconds: ptr.To[int64](0),
|
||||
})
|
||||
Expect(client.IgnoreNotFound(err)).To(Not(HaveOccurred()))
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func deleteNamespace(name string) {
|
||||
GinkgoHelper()
|
||||
|
||||
By(fmt.Sprintf("Deleting namespace %s", name))
|
||||
|
||||
err := k8s.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{
|
||||
GracePeriodSeconds: ptr.To[int64](0),
|
||||
})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
}
|
||||
|
||||
func NewCluster(namespace string) *v1beta1.Cluster {
|
||||
return &v1beta1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -145,9 +142,6 @@ func NewCluster(namespace string) *v1beta1.Cluster {
|
||||
Persistence: v1beta1.PersistenceConfig{
|
||||
Type: v1beta1.EphemeralPersistenceMode,
|
||||
},
|
||||
ServerArgs: []string{
|
||||
"--disable-network-policy",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -256,6 +250,39 @@ func NewVirtualK8sClientAndConfig(cluster *v1beta1.Cluster) (*kubernetes.Clients
|
||||
return virtualK8sClient, restcfg
|
||||
}
|
||||
|
||||
// NewVirtualK8sClient returns a Kubernetes ClientSet for the virtual cluster
|
||||
func NewVirtualK8sClientAndKubeconfig(cluster *v1beta1.Cluster) (*kubernetes.Clientset, *rest.Config, []byte) {
|
||||
GinkgoHelper()
|
||||
|
||||
var (
|
||||
err error
|
||||
config *clientcmdapi.Config
|
||||
)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
Eventually(func() error {
|
||||
vKubeconfig := kubeconfig.New()
|
||||
kubeletAltName := fmt.Sprintf("k3k-%s-kubelet", cluster.Name)
|
||||
vKubeconfig.AltNames = certs.AddSANs([]string{hostIP, kubeletAltName})
|
||||
config, err = vKubeconfig.Generate(ctx, k8sClient, cluster, hostIP, 0)
|
||||
return err
|
||||
}).
|
||||
WithTimeout(time.Minute * 2).
|
||||
WithPolling(time.Second * 5).
|
||||
Should(BeNil())
|
||||
|
||||
configData, err := clientcmd.Write(*config)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
restcfg, err := clientcmd.RESTConfigFromKubeConfig(configData)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
virtualK8sClient, err := kubernetes.NewForConfig(restcfg)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
|
||||
return virtualK8sClient, restcfg, configData
|
||||
}
|
||||
|
||||
func (c *VirtualCluster) NewNginxPod(namespace string) (*v1.Pod, string) {
|
||||
GinkgoHelper()
|
||||
|
||||
|
||||
668
tests/testdata/resources/ingress-nginx-v1.14.1.yaml
vendored
Normal file
668
tests/testdata/resources/ingress-nginx-v1.14.1.yaml
vendored
Normal file
@@ -0,0 +1,668 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
name: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
automountServiceAccountToken: true
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
automountServiceAccountToken: true
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resourceNames:
|
||||
- ingress-nginx-leader
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission
|
||||
rules:
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx-admission
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx-admission
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
data: null
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- appProtocol: http
|
||||
name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
- appProtocol: https
|
||||
name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-controller-admission
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
ports:
|
||||
- appProtocol: https
|
||||
name: https-webhook
|
||||
port: 443
|
||||
targetPort: webhook
|
||||
selector:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
minReadySeconds: 0
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
spec:
|
||||
automountServiceAccountToken: true
|
||||
containers:
|
||||
- args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-nginx-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --ingress-class=nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
- --validating-webhook=:8443
|
||||
- --validating-webhook-certificate=/usr/local/certificates/cert
|
||||
- --validating-webhook-key=/usr/local/certificates/key
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
image: registry.k8s.io/ingress-nginx/controller:v1.14.1@sha256:f95a79b85fb93ac3de752c71a5c27d5ceae10a18b61904dec224c1c6a4581e47
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
name: controller
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
name: https
|
||||
protocol: TCP
|
||||
- containerPort: 8443
|
||||
name: webhook
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: false
|
||||
runAsGroup: 82
|
||||
runAsNonRoot: true
|
||||
runAsUser: 101
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
volumeMounts:
|
||||
- mountPath: /usr/local/certificates/
|
||||
name: webhook-cert
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirst
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: ingress-nginx
|
||||
terminationGracePeriodSeconds: 300
|
||||
volumes:
|
||||
- name: webhook-cert
|
||||
secret:
|
||||
secretName: ingress-nginx-admission
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission-create
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission-create
|
||||
spec:
|
||||
automountServiceAccountToken: true
|
||||
containers:
|
||||
- args:
|
||||
- create
|
||||
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --secret-name=ingress-nginx-admission
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.5@sha256:03a00eb0e255e8a25fa49926c24cde0f7e12e8d072c445cdf5136ec78b546285
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: create
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 65532
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
ttlSecondsAfterFinished: 0
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission-patch
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission-patch
|
||||
spec:
|
||||
automountServiceAccountToken: true
|
||||
containers:
|
||||
- args:
|
||||
- patch
|
||||
- --webhook-name=ingress-nginx-admission
|
||||
- --namespace=$(POD_NAMESPACE)
|
||||
- --patch-mutating=false
|
||||
- --secret-name=ingress-nginx-admission
|
||||
- --patch-failure-policy=Fail
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.5@sha256:03a00eb0e255e8a25fa49926c24cde0f7e12e8d072c445cdf5136ec78b546285
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: patch
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 65532
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65532
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: ingress-nginx-admission
|
||||
ttlSecondsAfterFinished: 0
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: admission-webhook
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/version: 1.14.1
|
||||
name: ingress-nginx-admission
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: ingress-nginx-controller-admission
|
||||
namespace: ingress-nginx
|
||||
path: /networking/v1/ingresses
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Equivalent
|
||||
name: validate.nginx.ingress.kubernetes.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingresses
|
||||
sideEffects: None
|
||||
@@ -42,6 +42,7 @@ import (
|
||||
const (
|
||||
k3kNamespace = "k3k-system"
|
||||
|
||||
e2eTestLabel = "e2e"
|
||||
slowTestsLabel = "slow"
|
||||
updateTestsLabel = "update"
|
||||
persistenceTestsLabel = "persistence"
|
||||
@@ -409,7 +410,7 @@ func dumpK3kCoverageData(ctx context.Context, folder string) {
|
||||
}
|
||||
|
||||
_, err = k8s.CoreV1().Pods(k3kNamespace).Create(ctx, tarPod, metav1.CreateOptions{})
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(client.IgnoreAlreadyExists(err)).To(Not(HaveOccurred()))
|
||||
|
||||
By("Waiting for tar pod to be ready")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user