🏭️ Refactor prepare-tf

- fix tags so that they don't contain '='
- install metrics-server only if necessary
- set a maximum size to GKE node pool
- change tags to be shorter
This commit is contained in:
Jérôme Petazzoni
2022-01-09 20:51:50 +01:00
parent 7eb90b9d6f
commit 8de9e6e868
11 changed files with 58 additions and 22 deletions

View File

@@ -1,5 +1,6 @@
resource "random_string" "_" {
length = 5
length = 4
number = false
special = false
upper = false
}
@@ -7,10 +8,12 @@ resource "random_string" "_" {
resource "time_static" "_" {}
locals {
tag = format("tf-%s-%s", formatdate("YYYY-MM-DD-hh-mm", time_static._.rfc3339), random_string._.result)
timestamp = formatdate("YYYY-MM-DD-hh-mm", time_static._.rfc3339)
tag = random_string._.result
# Common tags to be assigned to all resources
common_tags = [
"created-by=terraform",
"tag=${local.tag}"
"created-by-terraform",
format("created-at-%s", local.timestamp),
format("created-for-%s", local.tag)
]
}

View File

@@ -1,5 +1,5 @@
module "clusters" {
source = "./modules/scaleway"
source = "./modules/linode"
for_each = local.clusters
cluster_name = each.value.cluster_name
min_nodes_per_pool = var.min_nodes_per_pool
@@ -13,10 +13,10 @@ locals {
clusters = {
for i in range(101, 101 + var.how_many_clusters) :
i => {
cluster_name = format("%s-%03d", local.tag, i)
kubeconfig_path = format("./stage2/kubeconfig.%03d", i)
#dashdash_kubeconfig = format("--kubeconfig=./stage2/kubeconfig.%03d", i)
cluster_name = format("%s-%03d", local.tag, i)
kubeconfig_path = format("./stage2/kubeconfig.%03d", i)
externalips_path = format("./stage2/externalips.%03d", i)
flags_path = format("./stage2/flags.%03d", i)
}
}
}
@@ -30,6 +30,15 @@ resource "local_file" "stage2" {
)
}
resource "local_file" "flags" {
for_each = local.clusters
filename = each.value.flags_path
file_permission = "0600"
content = <<-EOT
has_metrics_server: ${module.clusters[each.key].has_metrics_server}
EOT
}
resource "local_file" "kubeconfig" {
for_each = local.clusters
filename = each.value.kubeconfig_path

View File

@@ -5,7 +5,7 @@ resource "digitalocean_kubernetes_cluster" "_" {
version = var.k8s_version
node_pool {
name = "dok-x86"
name = "x86"
tags = local.common_tags
size = local.node_type
auto_scale = true

View File

@@ -3,14 +3,18 @@ resource "google_container_cluster" "_" {
project = "prepare-tf"
location = "europe-north1-a"
min_master_version = var.k8s_version
initial_node_count = var.min_nodes_per_pool
#max_size = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
#autoscaling = true
#autohealing = true
node_config {
tags = var.common_tags
machine_type = local.node_type
node_pool {
name = "x86"
node_config {
tags = var.common_tags
machine_type = local.node_type
}
initial_node_count = var.min_nodes_per_pool
autoscaling {
min_node_count = var.min_nodes_per_pool
max_node_count = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
}
}
# This is not strictly necessary.
@@ -23,3 +27,4 @@ resource "google_container_cluster" "_" {
}
}
}

View File

@@ -29,3 +29,7 @@ output "kubeconfig" {
output "cluster_id" {
value = google_container_cluster._.id
}
output "has_metrics_server" {
value = true
}

View File

@@ -5,3 +5,7 @@ output "kubeconfig" {
output "cluster_id" {
value = linode_lke_cluster._.id
}
output "has_metrics_server" {
value = false
}

View File

@@ -9,3 +9,7 @@ output "kubeconfig" {
output "cluster_id" {
value = oci_containerengine_cluster._.id
}
output "has_metrics_server" {
value = false
}

View File

@@ -8,7 +8,7 @@ resource "scaleway_k8s_cluster" "_" {
resource "scaleway_k8s_pool" "_" {
cluster_id = scaleway_k8s_cluster._.id
name = "scw-x86"
name = "x86"
tags = var.common_tags
node_type = local.node_type
size = var.min_nodes_per_pool

View File

@@ -5,3 +5,7 @@ output "kubeconfig" {
output "cluster_id" {
value = scaleway_k8s_cluster._.id
}
output "has_metrics_server" {
value = var.k8s_version >= 1.22
}

View File

@@ -140,9 +140,10 @@ provider "helm" {
}
resource "helm_release" "metrics_server_${index}" {
# Uncomment this for cluster that already install metrics-server
# (e.g. Scaleway Kapsule 1.22+, GKE...)
#count = 0
# Some providers pre-install metrics-server.
# Some don't. Let's install metrics-server,
# but only if it's not already installed.
count = yamldecode(file("./flags.${index}"))["has_metrics_server"] ? 0 : 1
provider = helm.cluster_${index}
repository = "https://charts.bitnami.com/bitnami"
chart = "metrics-server"

View File

@@ -1,12 +1,14 @@
variable "how_many_clusters" {
type = number
default = 2
default = 1
}
variable "node_size" {
type = string
default = "M"
# Can be S, M, L.
# We map these values to different specific instance types for each provider,
# but the idea is that they shoudl correspond to the following sizes:
# S = 2 GB RAM
# M = 4 GB RAM
# L = 8 GB RAM
@@ -24,5 +26,5 @@ variable "max_nodes_per_pool" {
variable "enable_arm_pool" {
type = bool
default = true
default = false
}