mirror of
https://github.com/rancher/k3k.git
synced 2026-02-14 10:00:15 +00:00
Dev doc update (#611)
* update development.md * fix tests * fix cli test
This commit is contained in:
2
Makefile
2
Makefile
@@ -111,7 +111,7 @@ lint: ## Find any linting issues in the project
|
||||
$(GOLANGCI_LINT) run --timeout=5m
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Find any linting issues in the project
|
||||
fmt: ## Format source files in the project
|
||||
$(GOLANGCI_LINT) fmt ./...
|
||||
|
||||
.PHONY: validate
|
||||
|
||||
@@ -11,6 +11,18 @@ To start developing K3k you will need:
|
||||
- A running Kubernetes cluster
|
||||
|
||||
|
||||
> [!IMPORTANT]
|
||||
>
|
||||
> Virtual clusters in shared mode need to have a configured storage provider, unless the `--persistence-type ephemeral` flag is used.
|
||||
>
|
||||
> To install the [`local-path-provisioner`](https://github.com/rancher/local-path-provisioner) and set it as the default storage class you can run:
|
||||
>
|
||||
> ```
|
||||
> kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.34/deploy/local-path-storage.yaml
|
||||
> kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
||||
> ```
|
||||
|
||||
|
||||
### TLDR
|
||||
|
||||
```shell
|
||||
@@ -43,9 +55,13 @@ To see all the available Make commands you can run `make help`, i.e:
|
||||
test-controller Run the controller tests (pkg/controller)
|
||||
test-kubelet-controller Run the controller tests (pkg/controller)
|
||||
test-e2e Run the e2e tests
|
||||
test-cli Run the cli tests
|
||||
generate Generate the CRDs specs
|
||||
docs Build the CRDs and CLI docs
|
||||
docs-crds Build the CRDs docs
|
||||
docs-cli Build the CLI docs
|
||||
lint Find any linting issues in the project
|
||||
fmt Format source files in the project
|
||||
validate Validate the project checking for any dependency or doc mismatch
|
||||
install Install K3k with Helm on the targeted Kubernetes cluster
|
||||
help Show this help.
|
||||
@@ -80,7 +96,20 @@ Once you have your images available you can install K3k with the `make install`
|
||||
|
||||
## Tests
|
||||
|
||||
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`).
|
||||
To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`, `test-cli`).
|
||||
|
||||
When running the tests the namespaces used are cleaned up. If you want to keep them to debug you can use the `KEEP_NAMESPACES`, i.e.:
|
||||
|
||||
```
|
||||
KEEP_NAMESPACES=true make test-e2e
|
||||
```
|
||||
|
||||
The e2e and cli tests run against the cluster configured in your KUBECONFIG environment variable. Running the tests with the `K3K_DOCKER_INSTALL` environment variable set will use `tescontainers` instead:
|
||||
|
||||
```
|
||||
K3K_DOCKER_INSTALL=true make test-e2e
|
||||
```
|
||||
|
||||
|
||||
We use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers.
|
||||
|
||||
@@ -153,3 +182,7 @@ Last thing to do is to get the kubeconfig to connect to the virtual cluster we'v
|
||||
```bash
|
||||
k3kcli kubeconfig generate --name mycluster --namespace k3k-mycluster --kubeconfig-server localhost:30001
|
||||
```
|
||||
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Because of technical limitation is not possible to create virtual clusters in `virtual` mode with K3d, or any other dockerized environment (Kind, Minikube)
|
||||
|
||||
@@ -41,7 +41,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
It("can get the version", func() {
|
||||
stdout, _, err := K3kcli("--version")
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(stdout).To(ContainSubstring("k3kcli version v"))
|
||||
Expect(stdout).To(ContainSubstring("k3kcli version "))
|
||||
})
|
||||
|
||||
When("trying the cluster commands", func() {
|
||||
@@ -53,13 +53,14 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
DeleteNamespaces(namespace.Name)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", clusterName)
|
||||
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
@@ -68,7 +69,7 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
Expect(stderr).To(BeEmpty())
|
||||
Expect(stdout).To(ContainSubstring(clusterNamespace))
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "delete", clusterName)
|
||||
_, stderr, err = K3kcli("cluster", "delete", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
|
||||
|
||||
@@ -90,7 +91,8 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
@@ -138,10 +140,8 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
err error
|
||||
)
|
||||
|
||||
namespaceName := "ns-" + rand.String(5)
|
||||
|
||||
_, _, err = Kubectl("create", "namespace", namespaceName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
namespace := NewNamespace()
|
||||
namespaceName := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(namespaceName)
|
||||
@@ -209,21 +209,22 @@ var _ = When("using the k3kcli", Label("cli"), func() {
|
||||
)
|
||||
|
||||
clusterName := "cluster-" + rand.String(5)
|
||||
clusterNamespace := "k3k-" + clusterName
|
||||
namespace := NewNamespace()
|
||||
clusterNamespace := namespace.Name
|
||||
|
||||
DeferCleanup(func() {
|
||||
DeleteNamespaces(clusterNamespace)
|
||||
})
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "create", clusterName)
|
||||
_, stderr, err = K3kcli("cluster", "create", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
_, stderr, err = K3kcli("kubeconfig", "generate", "--name", clusterName)
|
||||
_, stderr, err = K3kcli("kubeconfig", "generate", "--namespace", clusterNamespace, "--name", clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring("You can start using the cluster"))
|
||||
|
||||
_, stderr, err = K3kcli("cluster", "delete", clusterName)
|
||||
_, stderr, err = K3kcli("cluster", "delete", "--namespace", clusterNamespace, clusterName)
|
||||
Expect(err).To(Not(HaveOccurred()), string(stderr))
|
||||
Expect(stderr).To(ContainSubstring(`Deleting '%s' cluster in namespace '%s'`, clusterName, clusterNamespace))
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user