mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-14 17:49:59 +00:00
🪐 Add Terraform config to provision clusters
This is a new provisioning mechanism. Right now, it can provision clusters on: - Digital Ocean - Linode - Oracle Cloud - Scaleway Others should be relatively straightforward to add. Check the README in the prepare-tf subdirectory for details.
This commit is contained in:
committed by
Jerome Petazzoni
parent
8fed7a8adb
commit
ae74d9069f
14
.gitignore
vendored
14
.gitignore
vendored
@@ -6,13 +6,13 @@ prepare-vms/tags
|
||||
prepare-vms/infra
|
||||
prepare-vms/www
|
||||
|
||||
prepare-scw/.terraform*
|
||||
prepare-scw/terraform.*
|
||||
prepare-scw/stage2/*.tf
|
||||
prepare-scw/stage2/kubeconfig.*
|
||||
prepare-scw/stage2/.terraform*
|
||||
prepare-scw/stage2/terraform.*
|
||||
prepare-scw/stage2/wildcard_dns.*
|
||||
prepare-tf/.terraform*
|
||||
prepare-tf/terraform.*
|
||||
prepare-tf/stage2/*.tf
|
||||
prepare-tf/stage2/kubeconfig.*
|
||||
prepare-tf/stage2/.terraform*
|
||||
prepare-tf/stage2/terraform.*
|
||||
prepare-tf/stage2/externalips.*
|
||||
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
|
||||
76
prepare-tf/README.md
Normal file
76
prepare-tf/README.md
Normal file
@@ -0,0 +1,76 @@
|
||||
This directory contains a Terraform configuration to deploy
|
||||
a bunch of Kubernetes clusters on various cloud providers, using their respective managed Kubernetes products.
|
||||
|
||||
To use it:
|
||||
|
||||
1. Select the provider you wish to use.
|
||||
|
||||
Change the `source` attribute of the `module "clusters"` section.
|
||||
Check the content of the `modules` directory to see available choices.
|
||||
|
||||
```bash
|
||||
vim main.tf
|
||||
```
|
||||
|
||||
2. Initialize the provider.
|
||||
|
||||
```bash
|
||||
terraform init
|
||||
```
|
||||
|
||||
3. Configure provider authentication.
|
||||
|
||||
- Digital Ocean: `export DIGITALOCEAN_ACCESS_TOKEN=...`
|
||||
(check `~/.config/doctl/config.yaml` for the token)
|
||||
- Linode: `export LINODE_TOKEN=...`
|
||||
(check `~/.config/linode-cli` for the token)
|
||||
- Oracle Cloud: it should use `~/.oci/config`
|
||||
- Scaleway: run `scw init`
|
||||
|
||||
4. Decide how many clusters and how many nodes per clusters you want.
|
||||
|
||||
```bash
|
||||
export TF_VAR_how_many_clusters=5
|
||||
export TF_VAR_min_nodes_per_pool=2
|
||||
# Optional (will enable autoscaler when available)
|
||||
export TF_VAR_max_nodes_per_pool=4
|
||||
# Optional (will only work on some providers)
|
||||
export TF_VAR_enable_arm_pool=true
|
||||
```
|
||||
|
||||
5. Provision clusters.
|
||||
|
||||
```bash
|
||||
terraform apply
|
||||
```
|
||||
|
||||
6. Perform second stage provisioning.
|
||||
|
||||
This will install a SSH server on the clusters.
|
||||
|
||||
```bash
|
||||
cd stage2
|
||||
terraform init
|
||||
terraform apply
|
||||
```
|
||||
|
||||
7. Obtain cluster connection information.
|
||||
|
||||
The following command shows connection information, one cluster per line, ready to copy-paste in a shared document or spreadsheet.
|
||||
|
||||
```bash
|
||||
terraform output -json | jq -r 'to_entries[].value.value'
|
||||
```
|
||||
|
||||
8. Destroy clusters.
|
||||
|
||||
```bash
|
||||
cd ..
|
||||
terraform destroy
|
||||
```
|
||||
|
||||
9. Clean up stage2.
|
||||
|
||||
```bash
|
||||
rm stage/terraform.tfstate*
|
||||
```
|
||||
16
prepare-tf/locals.tf
Normal file
16
prepare-tf/locals.tf
Normal file
@@ -0,0 +1,16 @@
|
||||
resource "random_string" "_" {
|
||||
length = 5
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
resource "time_static" "_" {}
|
||||
|
||||
locals {
|
||||
tag = format("tf-%s-%s", formatdate("YYYY-MM-DD-hh-mm", time_static._.rfc3339), random_string._.result)
|
||||
# Common tags to be assigned to all resources
|
||||
common_tags = [
|
||||
"created-by=terraform",
|
||||
"tag=${local.tag}"
|
||||
]
|
||||
}
|
||||
46
prepare-tf/main.tf
Normal file
46
prepare-tf/main.tf
Normal file
@@ -0,0 +1,46 @@
|
||||
module "clusters" {
|
||||
source = "./modules/scaleway"
|
||||
for_each = local.clusters
|
||||
cluster_name = each.value.cluster_name
|
||||
min_nodes_per_pool = var.min_nodes_per_pool
|
||||
max_nodes_per_pool = var.max_nodes_per_pool
|
||||
enable_arm_pool = var.enable_arm_pool
|
||||
node_size = var.node_size
|
||||
common_tags = local.common_tags
|
||||
}
|
||||
|
||||
locals {
|
||||
clusters = {
|
||||
for i in range(101, 101 + var.how_many_clusters) :
|
||||
i => {
|
||||
cluster_name = format("%s-%03d", local.tag, i)
|
||||
kubeconfig_path = format("./stage2/kubeconfig.%03d", i)
|
||||
dashdash_kubeconfig = format("--kubeconfig=./stage2/kubeconfig.%03d", i)
|
||||
externalips_path = format("./stage2/externalips.%03d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "stage2" {
|
||||
filename = "./stage2/main.tf"
|
||||
content = templatefile(
|
||||
"./stage2.tmpl",
|
||||
{ clusters = local.clusters }
|
||||
)
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
for_each = local.clusters
|
||||
filename = each.value.kubeconfig_path
|
||||
content = module.clusters[each.key].kubeconfig
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = <<-EOT
|
||||
kubectl ${each.value.dashdash_kubeconfig} get nodes --watch \
|
||||
| grep --silent --line-buffered . \
|
||||
&& kubectl ${each.value.dashdash_kubeconfig} wait node --for=condition=Ready --all --timeout=10m \
|
||||
&& kubectl ${each.value.dashdash_kubeconfig} get nodes \
|
||||
-o 'jsonpath={.items[*].status.addresses[?(@.type=="ExternalIP")].address}' > ${each.value.externalips_path}
|
||||
EOT
|
||||
}
|
||||
}
|
||||
16
prepare-tf/modules/digitalocean/main.tf
Normal file
16
prepare-tf/modules/digitalocean/main.tf
Normal file
@@ -0,0 +1,16 @@
|
||||
resource "digitalocean_kubernetes_cluster" "_" {
|
||||
name = var.cluster_name
|
||||
tags = local.common_tags
|
||||
region = var.region
|
||||
version = var.k8s_version
|
||||
|
||||
node_pool {
|
||||
name = "dok-x86"
|
||||
tags = local.common_tags
|
||||
size = local.node_type
|
||||
auto_scale = true
|
||||
min_nodes = var.min_nodes_per_pool
|
||||
max_nodes = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
|
||||
}
|
||||
|
||||
}
|
||||
7
prepare-tf/modules/digitalocean/outputs.tf
Normal file
7
prepare-tf/modules/digitalocean/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "kubeconfig" {
|
||||
value = digitalocean_kubernetes_cluster._.kube_config.0.raw_config
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = digitalocean_kubernetes_cluster._.id
|
||||
}
|
||||
8
prepare-tf/modules/digitalocean/providers.tf
Normal file
8
prepare-tf/modules/digitalocean/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
digitalocean = {
|
||||
source = "digitalocean/digitalocean"
|
||||
version = "2.12.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
59
prepare-tf/modules/digitalocean/variables.tf
Normal file
59
prepare-tf/modules/digitalocean/variables.tf
Normal file
@@ -0,0 +1,59 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
locals {
|
||||
common_tags = [for tag in var.common_tags : replace(tag, "=", "-")]
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
# FIXME
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"S" = "s-1vcpu-2gb"
|
||||
"M" = "s-2vcpu-4gb"
|
||||
"L" = "s-4vcpu-8gb"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
default = "ams3"
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# doctl kubernetes options versions -o json | jq -r .[].slug
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.21.3-do.0"
|
||||
}
|
||||
17
prepare-tf/modules/linode/main.tf
Normal file
17
prepare-tf/modules/linode/main.tf
Normal file
@@ -0,0 +1,17 @@
|
||||
resource "linode_lke_cluster" "_" {
|
||||
label = var.cluster_name
|
||||
tags = var.common_tags
|
||||
region = var.region
|
||||
k8s_version = var.k8s_version
|
||||
|
||||
pool {
|
||||
type = local.node_type
|
||||
count = var.min_nodes_per_pool
|
||||
autoscaler {
|
||||
min = var.min_nodes_per_pool
|
||||
max = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
7
prepare-tf/modules/linode/outputs.tf
Normal file
7
prepare-tf/modules/linode/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "kubeconfig" {
|
||||
value = base64decode(linode_lke_cluster._.kubeconfig)
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = linode_lke_cluster._.id
|
||||
}
|
||||
8
prepare-tf/modules/linode/providers.tf
Normal file
8
prepare-tf/modules/linode/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
linode = {
|
||||
source = "linode/linode"
|
||||
version = "1.22.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
55
prepare-tf/modules/linode/variables.tf
Normal file
55
prepare-tf/modules/linode/variables.tf
Normal file
@@ -0,0 +1,55 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
# FIXME
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"S" = "g6-standard-1"
|
||||
"M" = "g6-standard-2"
|
||||
"L" = "g6-standard-4"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
default = "eu-central"
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# linode-cli lke versions-list --json | jq -r .[].id
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.21"
|
||||
}
|
||||
4
prepare-tf/modules/oraclecloud/generate-tfvars.sh
Executable file
4
prepare-tf/modules/oraclecloud/generate-tfvars.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
grep = ~/.oci/config | tr "=" " " | while read key value; do
|
||||
echo $key=\"$value\"
|
||||
done > terraform.tfvars
|
||||
59
prepare-tf/modules/oraclecloud/main.tf
Normal file
59
prepare-tf/modules/oraclecloud/main.tf
Normal file
@@ -0,0 +1,59 @@
|
||||
resource "oci_identity_compartment" "_" {
|
||||
name = var.cluster_name
|
||||
description = var.cluster_name
|
||||
}
|
||||
|
||||
locals {
|
||||
compartment_id = oci_identity_compartment._.id
|
||||
}
|
||||
|
||||
data "oci_identity_availability_domains" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
}
|
||||
|
||||
data "oci_core_images" "_" {
|
||||
for_each = local.pools
|
||||
compartment_id = local.compartment_id
|
||||
operating_system = "Oracle Linux"
|
||||
operating_system_version = "7.9"
|
||||
shape = each.value.shape
|
||||
}
|
||||
|
||||
resource "oci_containerengine_cluster" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
kubernetes_version = var.k8s_version
|
||||
name = "oke-tf"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
options {
|
||||
service_lb_subnet_ids = [oci_core_subnet.loadbalancers.id]
|
||||
}
|
||||
endpoint_config {
|
||||
is_public_ip_enabled = true
|
||||
subnet_id = oci_core_subnet.controlplane.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_containerengine_node_pool" "_" {
|
||||
for_each = local.pools
|
||||
cluster_id = oci_containerengine_cluster._.id
|
||||
compartment_id = local.compartment_id
|
||||
kubernetes_version = var.k8s_version
|
||||
name = each.key
|
||||
node_shape = each.value.shape
|
||||
node_shape_config {
|
||||
memory_in_gbs = local.node_type.memory_in_gbs
|
||||
ocpus = local.node_type.ocpus
|
||||
}
|
||||
node_config_details {
|
||||
size = var.min_nodes_per_pool
|
||||
placement_configs {
|
||||
availability_domain = data.oci_identity_availability_domains._.availability_domains[0].name
|
||||
subnet_id = oci_core_subnet.nodes.id
|
||||
}
|
||||
}
|
||||
node_source_details {
|
||||
image_id = data.oci_core_images._[each.key].images[0].id
|
||||
source_type = "image"
|
||||
}
|
||||
ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDxLXAXB3CUztORPd+SCEcxhSGakSqecCgtCQTK3owuOKnG9LAYsXFDc7DLGoWzhnYeU9fVkKuMh6aDXYCHJmjSyK5V7f+VVxrH/6kf2VrIK+bGglHbPxNVLS1/UsSEzdZdP0vFwoBoGjAFNFEcn0NXKAvtNlMsyakvjfrpuPIFrb7xkrZHhvezNKrG3Sx6lQ2RaSFFGJSTPOWQg/q8DsCv0D1T7RaR1oHrrb6KKQAWjGYoU75/zNN6x7nD/ITQe7JGNPS3JbqXqXj7WtM4Wm0FHF5CthEKecbyxCFGxhpNBwXN2GzNkItQkeans+pn6/g6lstWIT/ugddoxW3CvQt1 /home/jp/.ssh/id_rsa"
|
||||
}
|
||||
81
prepare-tf/modules/oraclecloud/network.tf
Normal file
81
prepare-tf/modules/oraclecloud/network.tf
Normal file
@@ -0,0 +1,81 @@
|
||||
resource "oci_core_vcn" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.0.0/16"
|
||||
display_name = "tf-vcn-${var.cluster_name}"
|
||||
}
|
||||
|
||||
#
|
||||
# On OCI, you can have either "public" or "private" subnets.
|
||||
# In both cases, instances get addresses in the VCN CIDR block;
|
||||
# but instances in "public" subnets also get a public address.
|
||||
#
|
||||
# Then, to enable communication to the outside world, you need:
|
||||
# - for public subnets, an "internet gateway"
|
||||
# (will allow inbound and outbound traffic)
|
||||
# - for private subnets, a "NAT gateway"
|
||||
# (will only allow outbound traffic)
|
||||
# - optionally, for private subnets, a "service gateway"
|
||||
# (to access other OCI services, e.g. object store)
|
||||
#
|
||||
# In this configuration, we use public subnets, and since we
|
||||
# need outside access, we add an internet gateway.
|
||||
#
|
||||
# Note that the default routing table in a VCN is empty, so we
|
||||
# add the internet gateway to the default routing table.
|
||||
# Similarly, the default security group in a VCN blocks almost
|
||||
# everything, so we add a blanket rule in that security group.
|
||||
#
|
||||
|
||||
resource "oci_core_internet_gateway" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
display_name = "tf-igw"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
}
|
||||
|
||||
resource "oci_core_default_route_table" "_" {
|
||||
manage_default_resource_id = oci_core_vcn._.default_route_table_id
|
||||
route_rules {
|
||||
destination = "0.0.0.0/0"
|
||||
destination_type = "CIDR_BLOCK"
|
||||
network_entity_id = oci_core_internet_gateway._.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_core_default_security_list" "_" {
|
||||
manage_default_resource_id = oci_core_vcn._.default_security_list_id
|
||||
ingress_security_rules {
|
||||
protocol = "all"
|
||||
source = "0.0.0.0/0"
|
||||
}
|
||||
egress_security_rules {
|
||||
protocol = "all"
|
||||
destination = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_core_subnet" "controlplane" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.254.0/24"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
display_name = "tf-subnet-controlplane"
|
||||
route_table_id = oci_core_default_route_table._.id
|
||||
security_list_ids = [oci_core_default_security_list._.id]
|
||||
}
|
||||
|
||||
resource "oci_core_subnet" "nodes" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.0.0/20"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
display_name = "tf-subnet-nodes"
|
||||
route_table_id = oci_core_default_route_table._.id
|
||||
security_list_ids = [oci_core_default_security_list._.id]
|
||||
}
|
||||
|
||||
resource "oci_core_subnet" "loadbalancers" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.96.0/20"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
display_name = "tf-subnet-loadbalancers"
|
||||
route_table_id = oci_core_default_route_table._.id
|
||||
security_list_ids = [oci_core_default_security_list._.id]
|
||||
}
|
||||
11
prepare-tf/modules/oraclecloud/outputs.tf
Normal file
11
prepare-tf/modules/oraclecloud/outputs.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
data "oci_containerengine_cluster_kube_config" "_" {
|
||||
cluster_id = oci_containerengine_cluster._.id
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = data.oci_containerengine_cluster_kube_config._.content
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = oci_containerengine_cluster._.id
|
||||
}
|
||||
8
prepare-tf/modules/oraclecloud/provider.tf
Normal file
8
prepare-tf/modules/oraclecloud/provider.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
oci = {
|
||||
source = "hashicorp/oci"
|
||||
version = "4.48.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
78
prepare-tf/modules/oraclecloud/variables.tf
Normal file
78
prepare-tf/modules/oraclecloud/variables.tf
Normal file
@@ -0,0 +1,78 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
locals {
|
||||
arm_pool = {
|
||||
shape = "VM.Standard.A1.Flex"
|
||||
}
|
||||
x86_pool = {
|
||||
shape = "VM.Standard.E4.Flex"
|
||||
}
|
||||
pools = var.enable_arm_pool ? {
|
||||
"oke-arm" = local.arm_pool
|
||||
"oke-x86" = local.x86_pool
|
||||
} : {
|
||||
"oke-x86" = local.x86_pool
|
||||
}
|
||||
}
|
||||
|
||||
output "pool" {
|
||||
value = local.pools
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
# FIXME put better typing here
|
||||
type = map(map(number))
|
||||
default = {
|
||||
"S" = {
|
||||
memory_in_gbs = 2
|
||||
ocpus = 1
|
||||
}
|
||||
"M" = {
|
||||
memory_in_gbs = 4
|
||||
ocpus = 1
|
||||
}
|
||||
"L" = {
|
||||
memory_in_gbs = 8
|
||||
ocpus = 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# oci ce cluster-options get --cluster-option-id all | jq -r '.data["kubernetes-versions"][]'
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "v1.20.11"
|
||||
}
|
||||
18
prepare-tf/modules/scaleway/main.tf
Normal file
18
prepare-tf/modules/scaleway/main.tf
Normal file
@@ -0,0 +1,18 @@
|
||||
resource "scaleway_k8s_cluster" "_" {
|
||||
name = var.cluster_name
|
||||
tags = var.common_tags
|
||||
version = var.k8s_version
|
||||
cni = var.cni
|
||||
}
|
||||
|
||||
resource "scaleway_k8s_pool" "_" {
|
||||
cluster_id = scaleway_k8s_cluster._.id
|
||||
name = "scw-x86"
|
||||
tags = var.common_tags
|
||||
node_type = local.node_type
|
||||
size = var.min_nodes_per_pool
|
||||
min_size = var.min_nodes_per_pool
|
||||
max_size = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
|
||||
autoscaling = true
|
||||
autohealing = true
|
||||
}
|
||||
7
prepare-tf/modules/scaleway/outputs.tf
Normal file
7
prepare-tf/modules/scaleway/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
output "kubeconfig" {
|
||||
value = scaleway_k8s_cluster._.kubeconfig.0.config_file
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = scaleway_k8s_cluster._.id
|
||||
}
|
||||
8
prepare-tf/modules/scaleway/providers.tf
Normal file
8
prepare-tf/modules/scaleway/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
scaleway = {
|
||||
source = "scaleway/scaleway"
|
||||
version = "2.1.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
55
prepare-tf/modules/scaleway/variables.tf
Normal file
55
prepare-tf/modules/scaleway/variables.tf
Normal file
@@ -0,0 +1,55 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
# FIXME
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"S" = "DEV1-S"
|
||||
"M" = "DEV1-M"
|
||||
"L" = "DEV1-L"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
variable "cni" {
|
||||
type = string
|
||||
default = "cilium"
|
||||
}
|
||||
|
||||
# See supported versions with:
|
||||
# scw k8s version list -o json | jq -r .[].name
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.22.2"
|
||||
}
|
||||
3
prepare-tf/providers.tf
Normal file
3
prepare-tf/providers.tf
Normal file
@@ -0,0 +1,3 @@
|
||||
terraform {
|
||||
required_version = ">= 1.0"
|
||||
}
|
||||
134
prepare-tf/stage2.tmpl
Normal file
134
prepare-tf/stage2.tmpl
Normal file
@@ -0,0 +1,134 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "2.0.3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
%{ for index, cluster in clusters ~}
|
||||
|
||||
provider "kubernetes" {
|
||||
alias = "cluster_${index}"
|
||||
config_path = "./kubeconfig.${index}"
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "sshpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "sshpod"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "sshpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "sshpod"
|
||||
namespace = kubernetes_namespace.sshpod_${index}.metadata.0.name
|
||||
}
|
||||
spec {
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "sshpod"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "sshpod"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
service_account_name = "sshpod"
|
||||
container {
|
||||
image = "jpetazzo/sshpod"
|
||||
name = "sshpod"
|
||||
env {
|
||||
name = "PASSWORD"
|
||||
value = random_string.sshpod_${index}.result
|
||||
}
|
||||
lifecycle {
|
||||
post_start {
|
||||
exec {
|
||||
command = [ "sh", "-c", "curl http://myip.enix.org/REMOTE_ADDR > /etc/HOSTIP || true" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
resources {
|
||||
limits = {
|
||||
cpu = "2"
|
||||
memory = "100M"
|
||||
}
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "100M"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "sshpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "sshpod"
|
||||
namespace = kubernetes_namespace.sshpod_${index}.metadata.0.name
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
app = "sshpod"
|
||||
}
|
||||
port {
|
||||
port = 22
|
||||
target_port = 22
|
||||
node_port = 32222
|
||||
}
|
||||
type = "NodePort"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service_account" "sshpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "sshpod"
|
||||
namespace = kubernetes_namespace.sshpod_${index}.metadata.0.name
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "sshpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "sshpod"
|
||||
}
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = "cluster-admin"
|
||||
}
|
||||
subject {
|
||||
kind = "ServiceAccount"
|
||||
name = "sshpod"
|
||||
namespace = "sshpod"
|
||||
}
|
||||
}
|
||||
|
||||
resource "random_string" "sshpod_${index}" {
|
||||
length = 6
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
output "ssh_${index}" {
|
||||
value = format(
|
||||
"ssh -l %s -p %s %s # password=%s",
|
||||
"k8s",
|
||||
"32222",
|
||||
split(" ", file("./externalips.${index}"))[0],
|
||||
random_string.sshpod_${index}.result
|
||||
)
|
||||
}
|
||||
|
||||
%{ endfor ~}
|
||||
28
prepare-tf/variables.tf
Normal file
28
prepare-tf/variables.tf
Normal file
@@ -0,0 +1,28 @@
|
||||
variable "how_many_clusters" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
# Can be S, M, L.
|
||||
# S = 2 GB RAM
|
||||
# M = 4 GB RAM
|
||||
# L = 8 GB RAM
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
Reference in New Issue
Block a user