Compare commits
187 Commits
refactorin
...
m6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01c374d0a4 | ||
|
|
eee44979c5 | ||
|
|
4d3bc06e30 | ||
|
|
229ab045b3 | ||
|
|
fe1a61eaeb | ||
|
|
9613589dea | ||
|
|
ca8865a10b | ||
|
|
f279bbea11 | ||
|
|
bc6100301e | ||
|
|
a32751636a | ||
|
|
4a0e23d131 | ||
|
|
6e987d1fca | ||
|
|
18b888009e | ||
|
|
36dd8bb695 | ||
|
|
395c5a38ab | ||
|
|
2b0d3b87ac | ||
|
|
a165e60407 | ||
|
|
3c13fd51dd | ||
|
|
324ad2fdd0 | ||
|
|
269ae79e30 | ||
|
|
39a15b3d7d | ||
|
|
9e7ed8cb49 | ||
|
|
06e7a47659 | ||
|
|
802e525f57 | ||
|
|
0f68f89840 | ||
|
|
b275342bd2 | ||
|
|
e11e97ccff | ||
|
|
023a9d0346 | ||
|
|
3f5eaae6b9 | ||
|
|
1634d5b5bc | ||
|
|
40418be55a | ||
|
|
04198b7f91 | ||
|
|
150c8fc768 | ||
|
|
e2af1bb057 | ||
|
|
d4c260aa4a | ||
|
|
89cd677b09 | ||
|
|
3008680c12 | ||
|
|
f7b8184617 | ||
|
|
a565c0979c | ||
|
|
7a11f03b5e | ||
|
|
b0760b99a5 | ||
|
|
bcb9c3003f | ||
|
|
99ce9b3a8a | ||
|
|
0ba602b533 | ||
|
|
d43c41e11e | ||
|
|
331309dc63 | ||
|
|
44146915e0 | ||
|
|
84996e739b | ||
|
|
2aea1f70b2 | ||
|
|
985e2ae42c | ||
|
|
ea58428a0c | ||
|
|
59e60786c0 | ||
|
|
af63cf1405 | ||
|
|
f9041807f6 | ||
|
|
785d704726 | ||
|
|
cd346ecace | ||
|
|
4de3c303a6 | ||
|
|
121713a6c7 | ||
|
|
4431cfe68a | ||
|
|
dcf218dbe2 | ||
|
|
43ff815d9f | ||
|
|
92e61ef83b | ||
|
|
45770cc584 | ||
|
|
58700396f9 | ||
|
|
8783da014c | ||
|
|
f780100217 | ||
|
|
555cd058bb | ||
|
|
a05d1f9d4f | ||
|
|
84365d03c6 | ||
|
|
164bc01388 | ||
|
|
c07116bd29 | ||
|
|
c4057f9c35 | ||
|
|
f57bd9a072 | ||
|
|
fca6396540 | ||
|
|
28ee1115ae | ||
|
|
2d171594fb | ||
|
|
f825f98247 | ||
|
|
7a369b4bcd | ||
|
|
087a68c06d | ||
|
|
b163ad0934 | ||
|
|
a46476fb0d | ||
|
|
37baf22bf2 | ||
|
|
79631603c5 | ||
|
|
52e6569f47 | ||
|
|
6c71a38ddc | ||
|
|
c6507c1561 | ||
|
|
10a4fff91c | ||
|
|
91218b2b16 | ||
|
|
106912fcf8 | ||
|
|
9e712e8a9e | ||
|
|
cc4c096558 | ||
|
|
908ffe0dd2 | ||
|
|
0e7058214a | ||
|
|
21dad159de | ||
|
|
3ab190710f | ||
|
|
8ea09e93ee | ||
|
|
88fbb6f629 | ||
|
|
7ee8c00cfa | ||
|
|
7d35bacbbe | ||
|
|
cd81b5287b | ||
|
|
0abc67e974 | ||
|
|
7305bcfe12 | ||
|
|
0d1873145e | ||
|
|
6105b57914 | ||
|
|
8724ab2835 | ||
|
|
a669b15313 | ||
|
|
76067dca97 | ||
|
|
e665dad1b8 | ||
|
|
543204b905 | ||
|
|
c3b81baa06 | ||
|
|
41e5467063 | ||
|
|
96f03066f9 | ||
|
|
a3d543c6fe | ||
|
|
e573d520e9 | ||
|
|
e7b8337dd5 | ||
|
|
8b554c02d3 | ||
|
|
99348d8a2b | ||
|
|
1ea72f2179 | ||
|
|
ff7cbb2e19 | ||
|
|
5d65cf2ef6 | ||
|
|
3fb2c1e9d1 | ||
|
|
59a569e9e7 | ||
|
|
0b95eac799 | ||
|
|
ce13afa0d4 | ||
|
|
e97c93e451 | ||
|
|
3eb0378d13 | ||
|
|
f98192ac76 | ||
|
|
3488f5ad7b | ||
|
|
51f9b2db3b | ||
|
|
787be94cb6 | ||
|
|
86d4dfa775 | ||
|
|
c550ea6553 | ||
|
|
0d761409d7 | ||
|
|
ea16766fd7 | ||
|
|
e5d0e3ef85 | ||
|
|
81026d9d41 | ||
|
|
8788012880 | ||
|
|
ab6ed864e3 | ||
|
|
21f08cf3bd | ||
|
|
00b126ff20 | ||
|
|
d5b462653e | ||
|
|
560be57017 | ||
|
|
303cf459c4 | ||
|
|
2f009de2db | ||
|
|
06ca097b52 | ||
|
|
b4383156a5 | ||
|
|
624ec14763 | ||
|
|
a5e270b756 | ||
|
|
41330f8302 | ||
|
|
4fcd490b30 | ||
|
|
633c29b62c | ||
|
|
0802701f11 | ||
|
|
c407e178d5 | ||
|
|
cb574d7cdd | ||
|
|
84988644df | ||
|
|
3ab64d79e4 | ||
|
|
6391b4d896 | ||
|
|
57e8c6ee2f | ||
|
|
42443df0dc | ||
|
|
9289d453bc | ||
|
|
3d8059c631 | ||
|
|
7ff17fbabd | ||
|
|
dbfda8b458 | ||
|
|
c8fc67c995 | ||
|
|
28222db2e4 | ||
|
|
a38f930858 | ||
|
|
2cef200726 | ||
|
|
1f77a52137 | ||
|
|
b188e0f8a9 | ||
|
|
ac203a128d | ||
|
|
a9920e5cf0 | ||
|
|
d1047f950d | ||
|
|
e380509ffe | ||
|
|
b5c754211e | ||
|
|
cc57d983b2 | ||
|
|
fd86e6079d | ||
|
|
08f2e76082 | ||
|
|
db848767c1 | ||
|
|
c07f52c493 | ||
|
|
016c8fc863 | ||
|
|
b9bbccb346 | ||
|
|
311a2aaf32 | ||
|
|
a19585a587 | ||
|
|
354bd9542e | ||
|
|
0c73e91e6f | ||
|
|
23064b5d26 | ||
|
|
971314a84f |
26
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "container.training environment to get started with Docker and/or Kubernetes",
|
||||
"image": "ghcr.io/jpetazzo/shpod",
|
||||
"features": {
|
||||
//"ghcr.io/devcontainers/features/common-utils:2": {}
|
||||
},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
"forwardPorts": [],
|
||||
|
||||
//"postCreateCommand": "... install extra packages...",
|
||||
"postStartCommand": "dind.sh",
|
||||
|
||||
// This lets us use "docker-outside-docker".
|
||||
// Unfortunately, minikube, kind, etc. don't work very well that way;
|
||||
// so for now, we'll likely use "docker-in-docker" instead (with a
|
||||
// privilege dcontainer). But we're still exposing that socket in case
|
||||
// someone wants to do something interesting with it.
|
||||
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
|
||||
|
||||
// This is for docker-in-docker.
|
||||
"privileged": true,
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
"remoteUser": "k8s"
|
||||
}
|
||||
1
.gitignore
vendored
@@ -9,6 +9,7 @@ prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
|
||||
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
|
||||
prepare-labs/terraform/tags
|
||||
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
|
||||
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
|
||||
prepare-labs/www
|
||||
|
||||
slides/*.yml.html
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM ruby:alpine
|
||||
RUN apk add --update build-base curl
|
||||
RUN gem install sinatra
|
||||
RUN gem install sinatra --version '~> 3'
|
||||
RUN gem install thin
|
||||
ADD hasher.rb /
|
||||
CMD ["ruby", "hasher.rb"]
|
||||
|
||||
9
k8s/M6-ingress-nginx-cm-patch.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
use-forwarded-headers: true
|
||||
compute-full-forwarded-for: true
|
||||
use-proxy-protocol: true
|
||||
10
k8s/M6-ingress-nginx-components.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: ingress-nginx
|
||||
12
k8s/M6-ingress-nginx-kustomization.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- M6-ingress-nginx-components.yaml
|
||||
- sync.yaml
|
||||
patches:
|
||||
- path: M6-ingress-nginx-cm-patch.yaml
|
||||
target:
|
||||
kind: ConfigMap
|
||||
- path: M6-ingress-nginx-svc-patch.yaml
|
||||
target:
|
||||
kind: Service
|
||||
8
k8s/M6-ingress-nginx-svc-patch.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: true
|
||||
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: true
|
||||
10
k8s/M6-kyverno-components.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: kyverno
|
||||
72
k8s/M6-kyverno-enforce-service-account.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: flux-multi-tenancy
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: serviceAccountName
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
- HelmRelease
|
||||
validate:
|
||||
message: ".spec.serviceAccountName is required"
|
||||
pattern:
|
||||
spec:
|
||||
serviceAccountName: "?*"
|
||||
- name: kustomizationSourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
- ingress-nginx
|
||||
- kyverno
|
||||
- monitoring
|
||||
- openebs
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
preconditions:
|
||||
any:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
- name: helmReleaseSourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
- ingress-nginx
|
||||
- kyverno
|
||||
- monitoring
|
||||
- openebs
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- HelmRelease
|
||||
preconditions:
|
||||
any:
|
||||
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "spec.chart.spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
29
k8s/M6-monitoring-components.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: monitoring
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: monitoring
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: grafana.test.metal.mybestdomain.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: kube-prometheus-stack-grafana
|
||||
port:
|
||||
number: 80
|
||||
35
k8s/M6-network-policies.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-webui
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: web
|
||||
ingress:
|
||||
- from: []
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-db
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: db
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app: web
|
||||
10
k8s/M6-openebs-components.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: openebs
|
||||
12
k8s/M6-openebs-kustomization.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: openebs
|
||||
resources:
|
||||
- M6-openebs-components.yaml
|
||||
- sync.yaml
|
||||
configMapGenerator:
|
||||
- name: openebs-values
|
||||
files:
|
||||
- values.yaml=M6-openebs-values.yaml
|
||||
configurations:
|
||||
- M6-openebs-kustomizeconfig.yaml
|
||||
6
k8s/M6-openebs-kustomizeconfig.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
nameReference:
|
||||
- kind: ConfigMap
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- path: spec/valuesFrom/name
|
||||
kind: HelmRelease
|
||||
15
k8s/M6-openebs-values.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# helm install openebs --namespace openebs openebs/openebs
|
||||
# --set engines.replicated.mayastor.enabled=false
|
||||
# --set lvm-localpv.lvmNode.kubeletDir=/var/lib/k0s/kubelet/
|
||||
# --create-namespace
|
||||
engines:
|
||||
replicated:
|
||||
mayastor:
|
||||
enabled: false
|
||||
# Needed for k0s install since kubelet install is slightly divergent from vanilla install >:-(
|
||||
lvm-localpv:
|
||||
lvmNode:
|
||||
kubeletDir: /var/lib/k0s/kubelet/
|
||||
localprovisioner:
|
||||
hostpathClass:
|
||||
isDefaultClass: true
|
||||
38
k8s/M6-rocky-cluster-role.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
namespace: rocky-test
|
||||
name: rocky-full-access
|
||||
rules:
|
||||
- apiGroups: ["", extensions, apps]
|
||||
resources: [deployments, replicasets, pods, services, ingresses, statefulsets]
|
||||
verbs: [get, list, watch, create, update, patch, delete] # You can also use [*]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: rocky-pv-access
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [persistentvolumes]
|
||||
verbs: [get, list, watch, create, patch]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
toolkit.fluxcd.io/tenant: rocky
|
||||
name: rocky-reconciler2
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: rocky-pv-access
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: gotk:rocky-test:reconciler
|
||||
- kind: ServiceAccount
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
|
||||
19
k8s/M6-rocky-ingress.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: rocky.test.mybestdomain.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: web
|
||||
port:
|
||||
number: 80
|
||||
|
||||
8
k8s/M6-rocky-test-kustomization.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- ../../base/rocky
|
||||
patches:
|
||||
- path: M6-rocky-test-patch.yaml
|
||||
target:
|
||||
kind: Kustomization
|
||||
7
k8s/M6-rocky-test-patch.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
spec:
|
||||
path: ./k8s/plain
|
||||
@@ -16,8 +16,7 @@ spec:
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
@@ -27,7 +26,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys >> /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
@@ -3,7 +3,6 @@ kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-1
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: ensure-pod-color-is-valid
|
||||
match:
|
||||
@@ -18,5 +17,6 @@ spec:
|
||||
operator: NotIn
|
||||
values: [ red, green, blue ]
|
||||
validate:
|
||||
failureAction: Enforce
|
||||
message: "If it exists, the label color must be red, green, or blue."
|
||||
deny: {}
|
||||
|
||||
@@ -3,7 +3,6 @@ kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
@@ -22,6 +21,7 @@ spec:
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
failureAction: Enforce
|
||||
message: "Once label color has been added, it cannot be changed."
|
||||
deny:
|
||||
conditions:
|
||||
|
||||
@@ -3,7 +3,6 @@ kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-3
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
@@ -22,7 +21,6 @@ spec:
|
||||
operator: Equals
|
||||
value: ""
|
||||
validate:
|
||||
failureAction: Enforce
|
||||
message: "Once label color has been added, it cannot be removed."
|
||||
deny:
|
||||
conditions:
|
||||
|
||||
deny: {}
|
||||
|
||||
13
k8s/pod-disruption-budget.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: my-pdb
|
||||
spec:
|
||||
#minAvailable: 2
|
||||
#minAvailable: 90%
|
||||
maxUnavailable: 1
|
||||
#maxUnavailable: 10%
|
||||
selector:
|
||||
matchLabels:
|
||||
app: my-app
|
||||
|
||||
27
k8s/sysctl.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: sysctl
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sysctl
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sysctl
|
||||
spec:
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
initContainers:
|
||||
- name: sysctl
|
||||
image: alpine
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- sysctl
|
||||
- fs.inotify.max_user_instances=99999
|
||||
containers:
|
||||
- name: pause
|
||||
image: registry.k8s.io/pause:3.8
|
||||
|
||||
@@ -21,6 +21,11 @@ digitalocean-pvc)
|
||||
jq '.[] | select(.name | startswith("pvc-")) | .id' |
|
||||
xargs -n1 -P10 doctl compute volume delete --force
|
||||
;;
|
||||
scaleway-pvc)
|
||||
scw instance volume list --output json |
|
||||
jq '.[] | select(.name | contains("_pvc-")) | .id' |
|
||||
xargs -n1 -P10 scw instance volume delete
|
||||
;;
|
||||
*)
|
||||
echo "Unknown combination of provider ('$1') and resource ('$2')."
|
||||
;;
|
||||
|
||||
@@ -10,13 +10,22 @@ fi
|
||||
. ~/creds/creds.cloudflare.dns
|
||||
|
||||
cloudflare() {
|
||||
case "$1" in
|
||||
GET|POST|DELETE)
|
||||
METHOD="$1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
METHOD=""
|
||||
;;
|
||||
esac
|
||||
URI=$1
|
||||
shift
|
||||
http https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
|
||||
http --ignore-stdin $METHOD https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
|
||||
}
|
||||
|
||||
_list_zones() {
|
||||
cloudflare zones | jq -r .result[].name
|
||||
cloudflare zones?per_page=100 | jq -r .result[].name
|
||||
}
|
||||
|
||||
_get_zone_id() {
|
||||
@@ -32,6 +41,15 @@ _populate_zone() {
|
||||
done
|
||||
}
|
||||
|
||||
_clear_zone() {
|
||||
ZONE_ID=$(_get_zone_id $1)
|
||||
for RECORD_ID in $(
|
||||
cloudflare zones/$ZONE_ID/dns_records | jq -r .result[].id
|
||||
); do
|
||||
cloudflare DELETE zones/$ZONE_ID/dns_records/$RECORD_ID
|
||||
done
|
||||
}
|
||||
|
||||
_add_zone() {
|
||||
cloudflare zones "name=$1"
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
# https://open-api.netlify.com/#tag/dnsZone
|
||||
[ "$1" ] || {
|
||||
[ "${1-}" ] || {
|
||||
echo ""
|
||||
echo "Add a record in Netlify DNS."
|
||||
echo "This script is hardcoded to add a record to container.training".
|
||||
@@ -12,13 +14,13 @@
|
||||
echo "$0 del <recordid>"
|
||||
echo ""
|
||||
echo "Example to create a A record for eu.container.training:"
|
||||
echo "$0 add eu 185.145.250.0"
|
||||
echo "$0 add eu A 185.145.250.0"
|
||||
echo ""
|
||||
exit 1
|
||||
}
|
||||
|
||||
NETLIFY_CONFIG_FILE=~/.config/netlify/config.json
|
||||
if ! [ "$DOMAIN" ]; then
|
||||
if ! [ "${DOMAIN-}" ]; then
|
||||
DOMAIN=container.training
|
||||
fi
|
||||
|
||||
@@ -49,27 +51,29 @@ ZONE_ID=$(netlify dns_zones |
|
||||
|
||||
_list() {
|
||||
netlify dns_zones/$ZONE_ID/dns_records |
|
||||
jq -r '.[] | select(.type=="A") | [.hostname, .type, .value, .id] | @tsv'
|
||||
jq -r '.[] | select(.type=="A" or .type=="AAAA") | [.hostname, .type, .value, .id] | @tsv' |
|
||||
sort |
|
||||
column --table
|
||||
}
|
||||
|
||||
_add() {
|
||||
NAME=$1.$DOMAIN
|
||||
ADDR=$2
|
||||
|
||||
TYPE=$2
|
||||
VALUE=$3
|
||||
|
||||
# It looks like if we create two identical records, then delete one of them,
|
||||
# Netlify DNS ends up in a weird state (the name doesn't resolve anymore even
|
||||
# though it's still visible through the API and the website?)
|
||||
|
||||
if netlify dns_zones/$ZONE_ID/dns_records |
|
||||
jq '.[] | select(.hostname=="'$NAME'" and .type=="A" and .value=="'$ADDR'")' |
|
||||
jq '.[] | select(.hostname=="'$NAME'" and .type=="'$TYPE'" and .value=="'$VALUE'")' |
|
||||
grep .
|
||||
then
|
||||
echo "It looks like that record already exists. Refusing to create it."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
netlify dns_zones/$ZONE_ID/dns_records type=A hostname=$NAME value=$ADDR ttl=300
|
||||
netlify dns_zones/$ZONE_ID/dns_records type=$TYPE hostname=$NAME value=$VALUE ttl=300
|
||||
|
||||
netlify dns_zones/$ZONE_ID/dns_records |
|
||||
jq '.[] | select(.hostname=="'$NAME'")'
|
||||
@@ -88,7 +92,7 @@ case "$1" in
|
||||
_list
|
||||
;;
|
||||
add)
|
||||
_add $2 $3
|
||||
_add $2 $3 $4
|
||||
;;
|
||||
del)
|
||||
_del $2
|
||||
|
||||
@@ -1,19 +1,54 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Baseline resource usage per vcluster in our usecase:
|
||||
# 500 MB RAM
|
||||
# 10% CPU
|
||||
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
|
||||
# PRO2-XS = 4 core, 16 gb
|
||||
|
||||
# deploy big cluster
|
||||
TF_VAR_node_size=g6-standard-6 \
|
||||
TF_VAR_nodes_per_cluster=5 \
|
||||
TF_VAR_location=eu-west \
|
||||
./labctl create --mode mk8s --settings settings/mk8s.env --provider linode --tag konk
|
||||
set -e
|
||||
|
||||
PROVIDER=scaleway
|
||||
STUDENTS=30
|
||||
|
||||
case "$PROVIDER" in
|
||||
linode)
|
||||
export TF_VAR_node_size=g6-standard-6
|
||||
export TF_VAR_location=us-east
|
||||
;;
|
||||
scaleway)
|
||||
export TF_VAR_node_size=PRO2-XS
|
||||
# For tiny testing purposes, these are okay too:
|
||||
#export TF_VAR_node_size=PLAY2-NANO
|
||||
export TF_VAR_location=fr-par-2
|
||||
;;
|
||||
esac
|
||||
|
||||
# set kubeconfig file
|
||||
cp tags/konk/stage2/kubeconfig.101 ~/kubeconfig
|
||||
export KUBECONFIG=~/kubeconfig
|
||||
|
||||
if [ "$PROVIDER" = "kind" ]; then
|
||||
kind create cluster --name konk
|
||||
ADDRTYPE=InternalIP
|
||||
else
|
||||
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag konk
|
||||
cp tags/konk/stage2/kubeconfig.101 $KUBECONFIG
|
||||
ADDRTYPE=ExternalIP
|
||||
fi
|
||||
|
||||
# set external_ip labels
|
||||
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="ExternalIP")].address}{"\n"}{end}' |
|
||||
while read node address; do
|
||||
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="'$ADDRTYPE'")].address}{"\n"}{end}' |
|
||||
while read node address ignoredaddresses; do
|
||||
kubectl label node $node external_ip=$address
|
||||
done
|
||||
|
||||
# vcluster all the things
|
||||
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students 27
|
||||
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students $STUDENTS
|
||||
|
||||
# install prometheus stack because that's cool
|
||||
helm upgrade --install --repo https://prometheus-community.github.io/helm-charts \
|
||||
--namespace prom-system --create-namespace \
|
||||
kube-prometheus-stack kube-prometheus-stack
|
||||
|
||||
# and also fix sysctl
|
||||
kubectl apply -f ../k8s/sysctl.yaml --namespace kube-system
|
||||
|
||||
@@ -57,7 +57,7 @@ need_tag() {
|
||||
if [ ! -d "tags/$TAG" ]; then
|
||||
die "Tag $TAG not found (directory tags/$TAG does not exist)."
|
||||
fi
|
||||
for FILE in settings.env ips.txt; do
|
||||
for FILE in mode provider settings.env status; do
|
||||
if [ ! -f "tags/$TAG/$FILE" ]; then
|
||||
warning "File tags/$TAG/$FILE not found."
|
||||
fi
|
||||
|
||||
@@ -19,20 +19,22 @@ _cmd_cards() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
die FIXME
|
||||
OPTIONS_FILE=$2
|
||||
[ -f "$OPTIONS_FILE" ] || die "Please specify a YAML options file as 2nd argument."
|
||||
OPTIONS_FILE_PATH="$(readlink -f "$OPTIONS_FILE")"
|
||||
|
||||
# This will process ips.txt to generate two files: ips.pdf and ips.html
|
||||
# This will process logins.jsonl to generate two files: cards.pdf and cards.html
|
||||
(
|
||||
cd tags/$TAG
|
||||
../../../lib/ips-txt-to-html.py settings.yaml
|
||||
../../../lib/make-login-cards.py "$OPTIONS_FILE_PATH"
|
||||
)
|
||||
|
||||
ln -sf ../tags/$TAG/ips.html www/$TAG.html
|
||||
ln -sf ../tags/$TAG/ips.pdf www/$TAG.pdf
|
||||
ln -sf ../tags/$TAG/cards.html www/$TAG.html
|
||||
ln -sf ../tags/$TAG/cards.pdf www/$TAG.pdf
|
||||
|
||||
info "Cards created. You can view them with:"
|
||||
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
|
||||
info "open tags/$TAG/ips.html (on macOS)"
|
||||
info "xdg-open tags/$TAG/cards.html tags/$TAG/cards.pdf (on Linux)"
|
||||
info "open tags/$TAG/cards.html (on macOS)"
|
||||
info "Or you can start a web server with:"
|
||||
info "$0 www"
|
||||
}
|
||||
@@ -47,6 +49,41 @@ _cmd_clean() {
|
||||
done
|
||||
}
|
||||
|
||||
_cmd codeserver "Install code-server on the clusters"
|
||||
_cmd_codeserver() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
ARCH=${ARCHITECTURE-amd64}
|
||||
CODESERVER_VERSION=4.96.4
|
||||
CODESERVER_URL=https://github.com/coder/code-server/releases/download/v${CODESERVER_VERSION}/code-server-${CODESERVER_VERSION}-linux-${ARCH}.tar.gz
|
||||
pssh "
|
||||
set -e
|
||||
i_am_first_node || exit 0
|
||||
if ! [ -x /usr/local/bin/code-server ]; then
|
||||
curl -fsSL $CODESERVER_URL | sudo tar zx -C /opt
|
||||
sudo ln -s /opt/code-server-${CODESERVER_VERSION}-linux-${ARCH}/bin/code-server /usr/local/bin/code-server
|
||||
sudo -u $USER_LOGIN -H code-server --install-extension ms-azuretools.vscode-docker
|
||||
sudo -u $USER_LOGIN -H code-server --install-extension ms-kubernetes-tools.vscode-kubernetes-tools
|
||||
sudo -u $USER_LOGIN -H mkdir -p /home/$USER_LOGIN/.local/share/code-server/User
|
||||
echo '{\"workbench.startupEditor\": \"terminal\"}' | sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.local/share/code-server/User/settings.json
|
||||
sudo -u $USER_LOGIN mkdir -p /home/$USER_LOGIN/.config/systemd/user
|
||||
sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.config/systemd/user/code-server.service <<EOF
|
||||
[Unit]
|
||||
Description=code-server
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/code-server --bind-addr [::]:1789
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl --user -M $USER_LOGIN@ enable code-server.service --now
|
||||
sudo loginctl enable-linger $USER_LOGIN
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd createuser "Create the user that students will use"
|
||||
_cmd_createuser() {
|
||||
TAG=$1
|
||||
@@ -126,6 +163,7 @@ set number
|
||||
set shiftwidth=2
|
||||
set softtabstop=2
|
||||
set nowrap
|
||||
set laststatus=2
|
||||
SQRL
|
||||
|
||||
pssh -I "sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.tmux.conf" <<SQRL
|
||||
@@ -256,21 +294,12 @@ _cmd_create() {
|
||||
terraform init
|
||||
echo tag = \"$TAG\" >> terraform.tfvars
|
||||
echo how_many_clusters = $STUDENTS >> terraform.tfvars
|
||||
echo nodes_per_cluster = $CLUSTERSIZE >> terraform.tfvars
|
||||
for RETRY in 1 2 3; do
|
||||
if terraform apply -auto-approve; then
|
||||
touch terraform.ok
|
||||
break
|
||||
fi
|
||||
done
|
||||
if ! [ -f terraform.ok ]; then
|
||||
die "Terraform failed."
|
||||
if [ "$CLUSTERSIZE" ]; then
|
||||
echo nodes_per_cluster = $CLUSTERSIZE >> terraform.tfvars
|
||||
fi
|
||||
)
|
||||
|
||||
sep
|
||||
info "Successfully created $COUNT instances with tag $TAG"
|
||||
echo create_ok > tags/$TAG/status
|
||||
|
||||
# If the settings.env file has a "STEPS" field,
|
||||
# automatically execute all the actions listed in that field.
|
||||
@@ -320,10 +349,11 @@ _cmd_clusterize() {
|
||||
pssh "
|
||||
set -e
|
||||
grep PSSH_ /etc/ssh/sshd_config || echo 'AcceptEnv PSSH_*' | sudo tee -a /etc/ssh/sshd_config
|
||||
grep KUBECOLOR_ /etc/ssh/sshd_config || echo 'AcceptEnv KUBECOLOR_*' | sudo tee -a /etc/ssh/sshd_config
|
||||
sudo systemctl restart ssh.service"
|
||||
|
||||
pssh -I < tags/$TAG/clusters.txt "
|
||||
grep -w \$PSSH_HOST | tr ' ' '\n' > /tmp/cluster"
|
||||
pssh -I < tags/$TAG/clusters.tsv "
|
||||
grep -w \$PSSH_HOST | tr '\t' '\n' > /tmp/cluster"
|
||||
pssh "
|
||||
echo \$PSSH_HOST > /tmp/ipv4
|
||||
head -n 1 /tmp/cluster | sudo tee /etc/ipv4_of_first_node
|
||||
@@ -344,6 +374,14 @@ _cmd_clusterize() {
|
||||
done < /tmp/cluster
|
||||
"
|
||||
|
||||
jq --raw-input --compact-output \
|
||||
--arg USER_LOGIN "$USER_LOGIN" --arg USER_PASSWORD "$USER_PASSWORD" '
|
||||
{
|
||||
"login": $USER_LOGIN,
|
||||
"password": $USER_PASSWORD,
|
||||
"ipaddrs": .
|
||||
}' < tags/$TAG/clusters.tsv > tags/$TAG/logins.jsonl
|
||||
|
||||
echo cluster_ok > tags/$TAG/status
|
||||
}
|
||||
|
||||
@@ -391,7 +429,7 @@ _cmd_docker() {
|
||||
##VERSION## https://github.com/docker/compose/releases
|
||||
COMPOSE_VERSION=v2.11.1
|
||||
COMPOSE_PLATFORM='linux-$(uname -m)'
|
||||
|
||||
|
||||
# Just in case you need Compose 1.X, you can use the following lines.
|
||||
# (But it will probably only work for x86_64 machines.)
|
||||
#COMPOSE_VERSION=1.29.2
|
||||
@@ -420,18 +458,18 @@ _cmd_kubebins() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
##VERSION##
|
||||
if [ "$KUBEVERSION" = "" ]; then
|
||||
KUBEVERSION="$(curl -fsSL https://cdn.dl.k8s.io/release/stable.txt | sed s/^v//)"
|
||||
fi
|
||||
|
||||
##VERSION##
|
||||
case "$KUBEVERSION" in
|
||||
1.19.*)
|
||||
ETCD_VERSION=v3.4.13
|
||||
CNI_VERSION=v0.8.7
|
||||
;;
|
||||
*)
|
||||
ETCD_VERSION=v3.5.9
|
||||
ETCD_VERSION=v3.5.10
|
||||
CNI_VERSION=v1.3.0
|
||||
;;
|
||||
esac
|
||||
@@ -465,24 +503,36 @@ _cmd_kubepkgs() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
if [ "$KUBEVERSION" ]; then
|
||||
pssh "
|
||||
sudo tee /etc/apt/preferences.d/kubernetes <<EOF
|
||||
# Prior September 2023, there was a single Kubernetes package repo that
|
||||
# contained packages for all versions, so we could just add that repo
|
||||
# and install whatever was the latest version available there.
|
||||
# Things have changed (versions after September 2023, e.g. 1.28.3 are
|
||||
# not in the old repo) and now there is a different repo for each
|
||||
# minor version, so we need to figure out what minor version we are
|
||||
# installing to add the corresponding repo.
|
||||
if [ "$KUBEVERSION" = "" ]; then
|
||||
KUBEVERSION="$(curl -fsSL https://cdn.dl.k8s.io/release/stable.txt | sed s/^v//)"
|
||||
fi
|
||||
KUBEREPOVERSION="$(echo $KUBEVERSION | cut -d. -f1-2)"
|
||||
|
||||
# Since the new repo doesn't have older versions, add a safety check here.
|
||||
MINORVERSION="$(echo $KUBEVERSION | cut -d. -f2)"
|
||||
if [ "$MINORVERSION" -lt 24 ]; then
|
||||
die "Cannot install kubepkgs for versions before 1.24."
|
||||
fi
|
||||
|
||||
pssh "
|
||||
sudo tee /etc/apt/preferences.d/kubernetes <<EOF
|
||||
Package: kubectl kubeadm kubelet
|
||||
Pin: version $KUBEVERSION-*
|
||||
Pin-Priority: 1000
|
||||
EOF"
|
||||
fi
|
||||
|
||||
# As of February 27th, 2023, packages.cloud.google.com seems broken
|
||||
# (serves HTTP 500 errors for the GPG key), so let's pre-load that key.
|
||||
pssh -I "sudo apt-key add -" < lib/kubernetes-apt-key.gpg
|
||||
|
||||
# Install packages
|
||||
pssh --timeout 200 "
|
||||
#curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg |
|
||||
#sudo apt-key add - &&
|
||||
echo deb http://apt.kubernetes.io/ kubernetes-xenial main |
|
||||
curl -fsSL https://pkgs.k8s.io/core:/stable:/v$KUBEREPOVERSION/deb/Release.key |
|
||||
gpg --dearmor | sudo tee /etc/apt/keyrings/kubernetes-apt-keyring.gpg &&
|
||||
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$KUBEREPOVERSION/deb/ /' |
|
||||
sudo tee /etc/apt/sources.list.d/kubernetes.list"
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
@@ -490,7 +540,7 @@ EOF"
|
||||
sudo apt-mark hold kubelet kubeadm kubectl &&
|
||||
kubeadm completion bash | sudo tee /etc/bash_completion.d/kubeadm &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
|
||||
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'alias k=kubecolor' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
|
||||
}
|
||||
|
||||
@@ -503,6 +553,7 @@ _cmd_kubeadm() {
|
||||
CLUSTER_CONFIGURATION_KUBERNETESVERSION='kubernetesVersion: "v'$KUBEVERSION'"'
|
||||
IGNORE_SYSTEMVERIFICATION="- SystemVerification"
|
||||
IGNORE_SWAP="- Swap"
|
||||
IGNORE_IPTABLES="- FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
|
||||
fi
|
||||
|
||||
# Install a valid configuration for containerd
|
||||
@@ -526,6 +577,7 @@ nodeRegistration:
|
||||
- NumCPU
|
||||
$IGNORE_SYSTEMVERIFICATION
|
||||
$IGNORE_SWAP
|
||||
$IGNORE_IPTABLES
|
||||
---
|
||||
kind: JoinConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
@@ -539,6 +591,7 @@ nodeRegistration:
|
||||
- NumCPU
|
||||
$IGNORE_SYSTEMVERIFICATION
|
||||
$IGNORE_SWAP
|
||||
$IGNORE_IPTABLES
|
||||
---
|
||||
kind: KubeletConfiguration
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
@@ -567,7 +620,9 @@ EOF
|
||||
# Install weave as the pod network
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml
|
||||
curl -fsSL https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml |
|
||||
sed s,weaveworks/weave,quay.io/rackspace/weave, |
|
||||
kubectl apply -f-
|
||||
fi"
|
||||
|
||||
# FIXME this is a gross hack to add the deployment key to our SSH agent,
|
||||
@@ -621,6 +676,31 @@ _cmd_kubetools() {
|
||||
;;
|
||||
esac
|
||||
|
||||
# Install ArgoCD CLI
|
||||
##VERSION## https://github.com/argoproj/argo-cd/releases/latest
|
||||
URL=https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-${ARCH}
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/argocd ]; then
|
||||
sudo curl -o /usr/local/bin/argocd -fsSL $URL
|
||||
sudo chmod +x /usr/local/bin/argocd
|
||||
argocd completion bash | sudo tee /etc/bash_completion.d/argocd
|
||||
argocd version --client
|
||||
fi"
|
||||
|
||||
# Install Flux CLI
|
||||
##VERSION## https://github.com/fluxcd/flux2/releases
|
||||
FLUX_VERSION=2.3.0
|
||||
FILENAME=flux_${FLUX_VERSION}_linux_${ARCH}
|
||||
URL=https://github.com/fluxcd/flux2/releases/download/v$FLUX_VERSION/$FILENAME.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/flux ]; then
|
||||
curl -fsSL $URL |
|
||||
sudo tar -C /usr/local/bin -zx flux
|
||||
sudo chmod +x /usr/local/bin/flux
|
||||
flux completion bash | sudo tee /etc/bash_completion.d/flux
|
||||
flux --version
|
||||
fi"
|
||||
|
||||
# Install kubectx and kubens
|
||||
pssh "
|
||||
set -e
|
||||
@@ -652,7 +732,7 @@ EOF
|
||||
|
||||
# Install stern
|
||||
##VERSION## https://github.com/stern/stern/releases
|
||||
STERN_VERSION=1.22.0
|
||||
STERN_VERSION=1.29.0
|
||||
FILENAME=stern_${STERN_VERSION}_linux_${ARCH}
|
||||
URL=https://github.com/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
|
||||
pssh "
|
||||
@@ -674,7 +754,7 @@ EOF
|
||||
|
||||
# Install kustomize
|
||||
##VERSION## https://github.com/kubernetes-sigs/kustomize/releases
|
||||
KUSTOMIZE_VERSION=v4.5.7
|
||||
KUSTOMIZE_VERSION=v5.4.1
|
||||
URL=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kustomize ]; then
|
||||
@@ -705,6 +785,16 @@ EOF
|
||||
aws-iam-authenticator version
|
||||
fi"
|
||||
|
||||
# Install jless (jless.io)
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/jless ]; then
|
||||
##VERSION##
|
||||
sudo apt-get install -y libxcb-render0 libxcb-shape0 libxcb-xfixes0
|
||||
wget https://github.com/PaulJuliusMartinez/jless/releases/download/v0.9.0/jless-v0.9.0-x86_64-unknown-linux-gnu.zip
|
||||
unzip jless-v0.9.0-x86_64-unknown-linux-gnu
|
||||
sudo mv jless /usr/local/bin
|
||||
fi"
|
||||
|
||||
# Install the krew package manager
|
||||
pssh "
|
||||
if [ ! -d /home/$USER_LOGIN/.krew ]; then
|
||||
@@ -716,21 +806,31 @@ EOF
|
||||
echo export PATH=/home/$USER_LOGIN/.krew/bin:\\\$PATH | sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc
|
||||
fi"
|
||||
|
||||
# Install kubecolor
|
||||
KUBECOLOR_VERSION=0.4.0
|
||||
URL=https://github.com/kubecolor/kubecolor/releases/download/v${KUBECOLOR_VERSION}/kubecolor_${KUBECOLOR_VERSION}_linux_${ARCH}.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubecolor ]; then
|
||||
##VERSION##
|
||||
curl -fsSL $URL |
|
||||
sudo tar -C /usr/local/bin -zx kubecolor
|
||||
fi"
|
||||
|
||||
# Install k9s
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
FILENAME=k9s_Linux_$ARCH.tar.gz &&
|
||||
curl -fsSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin k9s
|
||||
sudo tar -C /usr/local/bin -zx k9s
|
||||
k9s version
|
||||
fi"
|
||||
|
||||
# Install popeye
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/popeye ]; then
|
||||
FILENAME=popeye_Linux_$HERP_DERP_ARCH.tar.gz &&
|
||||
FILENAME=popeye_Linux_$ARCH.tar.gz &&
|
||||
curl -fsSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin popeye
|
||||
sudo tar -C /usr/local/bin -zx popeye
|
||||
popeye version
|
||||
fi"
|
||||
|
||||
@@ -740,10 +840,10 @@ EOF
|
||||
# But the install script is not arch-aware (see https://github.com/tilt-dev/tilt/pull/5050).
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/tilt ]; then
|
||||
TILT_VERSION=0.22.15
|
||||
TILT_VERSION=0.33.13
|
||||
FILENAME=tilt.\$TILT_VERSION.linux.$TILT_ARCH.tar.gz
|
||||
curl -fsSL https://github.com/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin tilt
|
||||
sudo tar -C /usr/local/bin -zx tilt
|
||||
tilt completion bash | sudo tee /etc/bash_completion.d/tilt
|
||||
tilt version
|
||||
fi"
|
||||
@@ -785,7 +885,8 @@ EOF
|
||||
fi"
|
||||
|
||||
##VERSION## https://github.com/bitnami-labs/sealed-secrets/releases
|
||||
KUBESEAL_VERSION=0.17.4
|
||||
KUBESEAL_VERSION=0.26.2
|
||||
URL=https://github.com/bitnami-labs/sealed-secrets/releases/download/v${KUBESEAL_VERSION}/kubeseal-${KUBESEAL_VERSION}-linux-${ARCH}.tar.gz
|
||||
#case $ARCH in
|
||||
#amd64) FILENAME=kubeseal-linux-amd64;;
|
||||
#arm64) FILENAME=kubeseal-arm64;;
|
||||
@@ -793,13 +894,13 @@ EOF
|
||||
#esac
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubeseal ]; then
|
||||
curl -fsSL https://github.com/bitnami-labs/sealed-secrets/releases/download/v$KUBESEAL_VERSION/kubeseal-$KUBESEAL_VERSION-linux-$ARCH.tar.gz |
|
||||
sudo tar -zxvf- -C /usr/local/bin kubeseal
|
||||
curl -fsSL $URL |
|
||||
sudo tar -C /usr/local/bin -zx kubeseal
|
||||
kubeseal --version
|
||||
fi"
|
||||
|
||||
##VERSION## https://github.com/vmware-tanzu/velero/releases
|
||||
VELERO_VERSION=1.11.0
|
||||
VELERO_VERSION=1.13.2
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/velero ]; then
|
||||
curl -fsSL https://github.com/vmware-tanzu/velero/releases/download/v$VELERO_VERSION/velero-v$VELERO_VERSION-linux-$ARCH.tar.gz |
|
||||
@@ -809,13 +910,21 @@ EOF
|
||||
fi"
|
||||
|
||||
##VERSION## https://github.com/doitintl/kube-no-trouble/releases
|
||||
KUBENT_VERSION=0.7.0
|
||||
KUBENT_VERSION=0.7.2
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubent ]; then
|
||||
curl -fsSL https://github.com/doitintl/kube-no-trouble/releases/download/${KUBENT_VERSION}/kubent-${KUBENT_VERSION}-linux-$ARCH.tar.gz |
|
||||
sudo tar -zxvf- -C /usr/local/bin kubent
|
||||
kubent --version
|
||||
fi"
|
||||
|
||||
# Ngrok. Note that unfortunately, this is the x86_64 binary.
|
||||
# We might have to rethink how to handle this for multi-arch environments.
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ngrok ]; then
|
||||
curl -fsSL https://bin.equinox.io/c/bNyj1mQVY4c/ngrok-v3-stable-linux-amd64.tgz |
|
||||
sudo tar -zxvf- -C /usr/local/bin ngrok
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
|
||||
@@ -863,6 +972,15 @@ _cmd_inventory() {
|
||||
FIXME
|
||||
}
|
||||
|
||||
_cmd logins "Show login information for a group of instances"
|
||||
_cmd_logins() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
|
||||
cat tags/$TAG/logins.jsonl \
|
||||
| jq -r '"\(if .codeServerPort then "\(.codeServerPort)\t" else "" end )\(.password)\tssh -l \(.login)\(if .port then " -p \(.port)" else "" end)\t\(.ipaddrs)"'
|
||||
}
|
||||
|
||||
_cmd maketag "Generate a quasi-unique tag for a group of instances"
|
||||
_cmd_maketag() {
|
||||
if [ -z $USER ]; then
|
||||
@@ -913,6 +1031,9 @@ _cmd_stage2() {
|
||||
cd tags/$TAG/stage2
|
||||
terraform init -upgrade
|
||||
terraform apply -auto-approve
|
||||
terraform output -raw logins_jsonl > ../logins.jsonl
|
||||
terraform output -raw ips_txt > ../ips.txt
|
||||
echo "stage2_ok" > status
|
||||
}
|
||||
|
||||
_cmd standardize "Deal with non-standard Ubuntu cloud images"
|
||||
@@ -949,12 +1070,19 @@ _cmd_standardize() {
|
||||
# Disable unattended upgrades so that they don't mess up with the subsequent steps
|
||||
pssh sudo rm -f /etc/apt/apt.conf.d/50unattended-upgrades
|
||||
|
||||
# Digital Ocean's cloud init disables password authentication; re-enable it.
|
||||
# Some cloud providers think that it's smart to disable password authentication.
|
||||
# We need to re-neable it, though.
|
||||
# Digital Ocecan
|
||||
pssh "
|
||||
if [ -f /etc/ssh/sshd_config.d/50-cloud-init.conf ]; then
|
||||
sudo rm /etc/ssh/sshd_config.d/50-cloud-init.conf
|
||||
sudo systemctl restart ssh.service
|
||||
fi"
|
||||
# AWS
|
||||
pssh "if [ -f /etc/ssh/sshd_config.d/60-cloudimg-settings.conf ]; then
|
||||
sudo rm /etc/ssh/sshd_config.d/60-cloudimg-settings.conf
|
||||
sudo systemctl restart ssh.service
|
||||
fi"
|
||||
|
||||
# Special case for oracle since their iptables blocks everything but SSH
|
||||
pssh "
|
||||
@@ -990,11 +1118,12 @@ _cmd_tailhist () {
|
||||
# halfway through and we're actually trying to download it again.
|
||||
pssh "
|
||||
set -e
|
||||
sudo apt-get install unzip -y
|
||||
wget -c https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
|
||||
unzip websocketd-0.3.0-linux_$ARCH.zip websocketd
|
||||
unzip -o websocketd-0.3.0-linux_$ARCH.zip websocketd
|
||||
sudo mv websocketd /usr/local/bin/websocketd
|
||||
sudo mkdir -p /tmp/tailhist
|
||||
sudo tee /root/tailhist.service <<EOF
|
||||
sudo mkdir -p /opt/tailhist
|
||||
sudo tee /opt/tailhist.service <<EOF
|
||||
[Unit]
|
||||
Description=tailhist
|
||||
|
||||
@@ -1002,16 +1131,36 @@ Description=tailhist
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/tmp/tailhist
|
||||
WorkingDirectory=/opt/tailhist
|
||||
ExecStart=/usr/local/bin/websocketd --port=1088 --staticdir=. sh -c \"tail -n +1 -f /home/$USER_LOGIN/.history || echo 'Could not read history file. Perhaps you need to \\\"chmod +r .history\\\"?'\"
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl enable /root/tailhist.service --now
|
||||
sudo systemctl enable /opt/tailhist.service --now
|
||||
"
|
||||
|
||||
pssh -I sudo tee /tmp/tailhist/index.html <lib/tailhist.html
|
||||
pssh -I sudo tee /opt/tailhist/index.html <lib/tailhist.html
|
||||
}
|
||||
|
||||
_cmd terraform "Apply Terraform configuration to provision resources."
|
||||
_cmd_terraform() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
echo terraforming > tags/$TAG/status
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform apply -auto-approve
|
||||
# The Terraform provider for Proxmox has a bug; sometimes it fails
|
||||
# to obtain VM address from the QEMU agent. In that case, we put
|
||||
# ERROR in the ips.txt file (instead of the VM IP address). Detect
|
||||
# that so that we run Terraform again (this typically solves the issue).
|
||||
if grep -q ERROR ips.txt; then
|
||||
die "Couldn't obtain IP address of some machines. Try to re-run terraform."
|
||||
fi
|
||||
)
|
||||
echo terraformed > tags/$TAG/status
|
||||
|
||||
}
|
||||
|
||||
_cmd tools "Install a bunch of useful tools (editors, git, jq...)"
|
||||
@@ -1020,8 +1169,9 @@ _cmd_tools() {
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
set -e
|
||||
sudo apt-get -q update
|
||||
sudo apt-get -qy install apache2-utils emacs-nox git httping htop jid joe jq mosh python-setuptools tree unzip
|
||||
sudo apt-get -qy install apache2-utils argon2 emacs-nox git httping htop jid joe jq mosh tree unzip
|
||||
# This is for VMs with broken PRNG (symptom: running docker-compose randomly hangs)
|
||||
sudo apt-get -qy install haveged
|
||||
"
|
||||
@@ -1084,8 +1234,8 @@ _cmd_tags() {
|
||||
cd tags
|
||||
echo "[#] [Status] [Tag] [Mode] [Provider]"
|
||||
for tag in *; do
|
||||
if [ -f $tag/ips.txt ]; then
|
||||
count="$(wc -l < $tag/ips.txt)"
|
||||
if [ -f $tag/logins.jsonl ]; then
|
||||
count="$(wc -l < $tag/logins.jsonl)"
|
||||
else
|
||||
count="?"
|
||||
fi
|
||||
@@ -1161,7 +1311,13 @@ _cmd_passwords() {
|
||||
$0 ips "$TAG" | paste "$PASSWORDS_FILE" - | while read password nodes; do
|
||||
info "Setting password for $nodes..."
|
||||
for node in $nodes; do
|
||||
echo $USER_LOGIN:$password | ssh $SSHOPTS -i tags/$TAG/id_rsa ubuntu@$node sudo chpasswd
|
||||
echo $USER_LOGIN $password | ssh $SSHOPTS -i tags/$TAG/id_rsa ubuntu@$node '
|
||||
read login password
|
||||
echo $login:$password | sudo chpasswd
|
||||
hashedpassword=$(echo -n $password | argon2 saltysalt$RANDOM -e)
|
||||
sudo -u $login mkdir -p /home/$login/.config/code-server
|
||||
echo "hashed-password: \"$hashedpassword\"" | sudo -u $login tee /home/$login/.config/code-server/config.yaml >/dev/null
|
||||
'
|
||||
done
|
||||
done
|
||||
info "Done."
|
||||
@@ -1193,6 +1349,11 @@ _cmd_wait() {
|
||||
pssh -l $SSH_USER "
|
||||
if [ -d /var/lib/cloud ]; then
|
||||
cloud-init status --wait
|
||||
case $? in
|
||||
0) exit 0;; # all is good
|
||||
2) exit 0;; # recoverable error (happens with proxmox deprecated cloud-init payloads)
|
||||
*) exit 1;; # all other problems
|
||||
esac
|
||||
fi"
|
||||
}
|
||||
|
||||
@@ -1235,7 +1396,7 @@ WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/opt/webssh
|
||||
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
|
||||
ExecStart=/usr/bin/env python3 run.py --fbidhttp=false --port=1080 --policy=reject
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
@@ -1248,7 +1409,7 @@ EOF"
|
||||
_cmd www "Run a web server to access card HTML and PDF"
|
||||
_cmd_www() {
|
||||
cd www
|
||||
IPADDR=$(curl -sL canihazip.com/s)
|
||||
IPADDR=$(curl -fsSL canihazip.com/s || echo localhost)
|
||||
info "The following files are available:"
|
||||
for F in *; do
|
||||
echo "http://$IPADDR:8000/$F"
|
||||
|
||||
@@ -1,32 +1,22 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import jinja2
|
||||
|
||||
|
||||
# Read settings from user-provided settings file
|
||||
context = yaml.safe_load(open(sys.argv[1]))
|
||||
|
||||
ips = list(open("ips.txt"))
|
||||
clustersize = context["clustersize"]
|
||||
context["logins"] = []
|
||||
for line in open("logins.jsonl"):
|
||||
if line.strip():
|
||||
context["logins"].append(json.loads(line))
|
||||
|
||||
print("---------------------------------------------")
|
||||
print(" Number of IPs: {}".format(len(ips)))
|
||||
print(" VMs per cluster: {}".format(clustersize))
|
||||
print(" Number of cards: {}".format(len(context["logins"])))
|
||||
print("---------------------------------------------")
|
||||
|
||||
assert len(ips)%clustersize == 0
|
||||
|
||||
clusters = []
|
||||
|
||||
while ips:
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
clusters.append(cluster)
|
||||
|
||||
context["clusters"] = clusters
|
||||
|
||||
template_file_name = context["cards_template"]
|
||||
template_file_path = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
@@ -35,23 +25,23 @@ template_file_path = os.path.join(
|
||||
template_file_name
|
||||
)
|
||||
template = jinja2.Template(open(template_file_path).read())
|
||||
with open("ips.html", "w") as f:
|
||||
f.write(template.render(**context))
|
||||
print("Generated ips.html")
|
||||
with open("cards.html", "w") as f:
|
||||
f.write(template.render(**context))
|
||||
print("Generated cards.html")
|
||||
|
||||
|
||||
try:
|
||||
import pdfkit
|
||||
paper_size = context["paper_size"]
|
||||
margin = {"A4": "0.5cm", "Letter": "0.2in"}[paper_size]
|
||||
with open("ips.html") as f:
|
||||
pdfkit.from_file(f, "ips.pdf", options={
|
||||
with open("cards.html") as f:
|
||||
pdfkit.from_file(f, "cards.pdf", options={
|
||||
"page-size": paper_size,
|
||||
"margin-top": margin,
|
||||
"margin-bottom": margin,
|
||||
"margin-left": margin,
|
||||
"margin-right": margin,
|
||||
})
|
||||
print("Generated ips.pdf")
|
||||
print("Generated cards.pdf")
|
||||
except ImportError:
|
||||
print("WARNING: could not import pdfkit; did not generate ips.pdf")
|
||||
print("WARNING: could not import pdfkit; did not generate cards.pdf")
|
||||
@@ -17,6 +17,12 @@ pssh() {
|
||||
|
||||
echo "[parallel-ssh] $@"
|
||||
|
||||
# There are some routers that really struggle with the number of TCP
|
||||
# connections that we open when deploying large fleets of clusters.
|
||||
# We're adding a 1 second delay here, but this can be cranked up if
|
||||
# necessary - or down to zero, too.
|
||||
sleep ${PSSH_DELAY_PRE-1}
|
||||
|
||||
$(which pssh || which parallel-ssh) -h $HOSTFILE -l ubuntu \
|
||||
--par ${PSSH_PARALLEL_CONNECTIONS-100} \
|
||||
--timeout 300 \
|
||||
|
||||
16
prepare-labs/map-dns.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/sh
|
||||
|
||||
DOMAINS=domains.txt
|
||||
IPS=ips.txt
|
||||
|
||||
. ./dns-cloudflare.sh
|
||||
|
||||
paste "$DOMAINS" "$IPS" | while read domain ips; do
|
||||
if ! [ "$domain" ]; then
|
||||
echo "⚠️ No more domains!"
|
||||
exit 1
|
||||
fi
|
||||
_clear_zone "$domain"
|
||||
_populate_zone "$domain" $ips
|
||||
done
|
||||
echo "✅ All done."
|
||||
@@ -7,6 +7,7 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -7,6 +7,7 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -11,6 +11,7 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -7,9 +7,10 @@ USER_PASSWORD=training
|
||||
|
||||
# For a list of old versions, check:
|
||||
# https://kubernetes.io/releases/patch-releases/#non-active-branch-history
|
||||
KUBEVERSION=1.22.5
|
||||
KUBEVERSION=1.28.9
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -6,6 +6,7 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -6,6 +6,7 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -6,6 +6,7 @@ USER_LOGIN=docker
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
@@ -14,6 +15,5 @@ STEPS="
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
cards
|
||||
ips
|
||||
"
|
||||
"
|
||||
|
||||
6
prepare-labs/settings/konk.env
Normal file
@@ -0,0 +1,6 @@
|
||||
CLUSTERSIZE=5
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=
|
||||
|
||||
STEPS="terraform stage2"
|
||||
@@ -6,6 +6,7 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -7,6 +7,7 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
CLUSTERSIZE=2
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=
|
||||
|
||||
STEPS="stage2"
|
||||
STEPS="terraform stage2"
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
#export TF_VAR_node_size=GP2.4
|
||||
#export TF_VAR_node_size=g6-standard-6
|
||||
#export TF_VAR_node_size=m7i.xlarge
|
||||
|
||||
|
||||
CLUSTERSIZE=1
|
||||
|
||||
CLUSTERPREFIX=CHANGEME
|
||||
@@ -6,6 +11,7 @@ USER_LOGIN=portal
|
||||
USER_PASSWORD=CHANGEME
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
{%- set url = url
|
||||
| default("http://FIXME.container.training/") -%}
|
||||
{%- set pagesize = pagesize
|
||||
| default(9) -%}
|
||||
| default(10) -%}
|
||||
{%- set lang = lang
|
||||
| default("en") -%}
|
||||
{%- set event = event
|
||||
@@ -15,79 +15,36 @@
|
||||
{%- set backside = backside
|
||||
| default(False) -%}
|
||||
{%- set image = image
|
||||
| default("kube") -%}
|
||||
| default(False) -%}
|
||||
{%- set clusternumber = clusternumber
|
||||
| default(None) -%}
|
||||
{%- if qrcode == True -%}
|
||||
{%- set qrcode = "https://container.training/q" -%}
|
||||
{%- elif qrcode -%}
|
||||
{%- set qrcode = qrcode -%}
|
||||
{%- endif -%}
|
||||
{%- set thing = thing
|
||||
| default("lab environment") -%}
|
||||
|
||||
{# You can also set img_bottom_src instead. #}
|
||||
{%- set img_logo_src = {
|
||||
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
|
||||
"swarm": "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png",
|
||||
"kube": "https://avatars1.githubusercontent.com/u/13629408",
|
||||
"enix": "https://enix.io/static/img/logos/logo-domain-cropped.png",
|
||||
}[image] -%}
|
||||
{%- if lang == "en" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
machine for this {{ event }}.
|
||||
You can connect to this VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machine is:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
cluster for this {{ event }}.
|
||||
You can connect to each VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machines are:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
machine pour cette formation.
|
||||
Vous pouvez vous connecter à cette machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresse IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clusterprefix != "node" -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information for the
|
||||
<strong>{{ clusterprefix }}</strong> environment.
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
cluster pour cette formation.
|
||||
Vous pouvez vous connecter à chaque machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresses IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" -%}
|
||||
{%- set slides_are_at -%}
|
||||
You can find the slides at:
|
||||
{%- endset -%}
|
||||
{%- if lang == "en" -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
{{ thing }} for this {{ event }}.
|
||||
You can connect to it with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" -%}
|
||||
{%- set slides_are_at -%}
|
||||
Le support de formation est à l'adresse suivante :
|
||||
{%- endset -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
{{ thing }} pour cette formation.
|
||||
Vous pouvez vous y connecter
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" -%}
|
||||
{%- set slides_are_at -%}
|
||||
You can find the slides at:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" -%}
|
||||
{%- set slides_are_at -%}
|
||||
Le support de formation est à l'adresse suivante :
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
@@ -102,25 +59,21 @@
|
||||
}
|
||||
body {
|
||||
/* this is A4 minus 0.5cm margins */
|
||||
width: 20cm;
|
||||
height: 28.7cm;
|
||||
width: 20cm;
|
||||
height: 28.7cm;
|
||||
}
|
||||
{% elif paper_size == "Letter" %}
|
||||
@page {
|
||||
size: Letter;
|
||||
margin: 0.2in;
|
||||
size: Letter; /* 8.5in x 11in */
|
||||
}
|
||||
body {
|
||||
/* this is Letter minus 0.2in margins */
|
||||
width: 8.6in;
|
||||
heigth: 10.6in;
|
||||
width: 6.75in; /* two cards wide */
|
||||
margin-left: 0.875in; /* (8.5in - 6.75in)/2 */
|
||||
margin-top: 0.1875in; /* (11in - 5 cards)/2 */
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
@@ -134,47 +87,45 @@ table {
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
td:first-child {
|
||||
width: 10.5em;
|
||||
}
|
||||
|
||||
div.card {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
{% if backside %}
|
||||
height: 33%;
|
||||
{% endif %}
|
||||
/* columns * (width+left+right) < 100% */
|
||||
border: 0.01in dotted black;
|
||||
/*
|
||||
width: 24.8%;
|
||||
columns * (width+left+right) < 100%
|
||||
height: 33%;
|
||||
width: 24.8%;
|
||||
width: 33%;
|
||||
*/
|
||||
/**/
|
||||
width: 33%;
|
||||
/**/
|
||||
width: 3.355in; /* 3.375in minus two 0.01in borders */
|
||||
height: 2.105in; /* 2.125in minus two 0.01in borders */
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.8em;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted grey;
|
||||
div.front {
|
||||
{% if image %}
|
||||
background-image: url("{{ image }}");
|
||||
background-repeat: no-repeat;
|
||||
background-size: 1in;
|
||||
background-position-x: 2.8in;
|
||||
background-position-y: center;
|
||||
{% endif %}
|
||||
}
|
||||
|
||||
span.scale {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
img.logo {
|
||||
height: 4.5em;
|
||||
float: right;
|
||||
}
|
||||
|
||||
img.bottom {
|
||||
height: 2.5em;
|
||||
display: block;
|
||||
margin: 0.5em auto;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.qrcode img {
|
||||
width: 40%;
|
||||
margin: 1em;
|
||||
height: 5.8em;
|
||||
padding: 1em 1em 0.5em 1em;
|
||||
float: left;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
@@ -189,101 +140,97 @@ img.bottom {
|
||||
height: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="text/javascript" src="https://cdn.rawgit.com/davidshimjs/qrcodejs/gh-pages/qrcode.min.js"></script>
|
||||
<script type="text/javascript" src="qrcode.min.js"></script>
|
||||
<script type="text/javascript">
|
||||
function qrcodes() {
|
||||
[].forEach.call(
|
||||
document.getElementsByClassName("qrcode"),
|
||||
(e, index) => {
|
||||
new QRCode(e, {
|
||||
text: "{{ qrcode }}",
|
||||
correctLevel: QRCode.CorrectLevel.L
|
||||
});
|
||||
}
|
||||
);
|
||||
[].forEach.call(
|
||||
document.getElementsByClassName("qrcode"),
|
||||
(e, index) => {
|
||||
new QRCode(e, {
|
||||
text: "{{ qrcode }}",
|
||||
correctLevel: QRCode.CorrectLevel.L
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
function scale() {
|
||||
[].forEach.call(
|
||||
document.getElementsByClassName("scale"),
|
||||
(e, index) => {
|
||||
var text_width = e.getBoundingClientRect().width;
|
||||
var box_width = e.parentElement.getBoundingClientRect().width;
|
||||
var percent = 100 * box_width / text_width + "%";
|
||||
e.style.fontSize = percent;
|
||||
}
|
||||
);
|
||||
[].forEach.call(
|
||||
document.getElementsByClassName("scale"),
|
||||
(e, index) => {
|
||||
var text_width = e.getBoundingClientRect().width;
|
||||
var box_width = e.parentElement.getBoundingClientRect().width;
|
||||
var percent = 100 * box_width / text_width + "%";
|
||||
e.style.fontSize = percent;
|
||||
}
|
||||
);
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body onload="qrcodes(); scale();">
|
||||
{% for cluster in clusters %}
|
||||
<div>
|
||||
{% for login in logins %}
|
||||
<div class="card front">
|
||||
<p>{{ intro }}</p>
|
||||
<p>
|
||||
{% if img_logo_src %}
|
||||
<img class="logo" src="{{ img_logo_src }}" />
|
||||
{% endif %}
|
||||
<table>
|
||||
{% if clusternumber != None %}
|
||||
<tr><td>cluster:</td></tr>
|
||||
<tr><td class="logpass">{{ clusternumber + loop.index }}</td></tr>
|
||||
{% endif %}
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">{{ user_login }}</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
{{ listhead }}
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr>
|
||||
<td>{{ clusterprefix }}{{ loop.index }}:</td>
|
||||
<td>{{ node }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
<tr>
|
||||
<td>login:</td>
|
||||
<td>password:</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="logpass">{{ login.login }}</td>
|
||||
<td class="logpass">{{ login.password }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>IP address:</td>
|
||||
{% if login.port %}
|
||||
<td>port:</td>
|
||||
{% endif %}
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="logpass">{{ login.ipaddrs.split("\t")[0] }}</td>
|
||||
{% if login.port %}
|
||||
<td class="logpass">{{ login.port }}</td>
|
||||
{% endif %}
|
||||
</tr>
|
||||
</table>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
{% if url %}
|
||||
{{ slides_are_at }}
|
||||
{{ slides_are_at }}
|
||||
<p>
|
||||
<span class="scale">{{ url }}</span>
|
||||
</p>
|
||||
{% endif %}
|
||||
{% if img_bottom_src %}
|
||||
<img class="bottom" src="{{ img_bottom_src }}" />
|
||||
{% endif %}
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% if backside %}
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<p>Thanks for attending
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during CONFERENCE in Month YYYY!</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team, in person or
|
||||
online, with custom courses of
|
||||
any length and any level.
|
||||
</p>
|
||||
{% if qrcode %}
|
||||
<p>If you're interested, please scan that QR code to contact me:</p>
|
||||
<span class="qrcode"></span>
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="card back">
|
||||
{{ backside }}
|
||||
{#
|
||||
<p>Thanks for attending
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during CONFERENCE in Month YYYY!</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team, in person or
|
||||
online, with custom courses of
|
||||
any length and any level.
|
||||
</p>
|
||||
{% if qrcode %}
|
||||
<p>If you're interested, please scan that QR code to contact me:</p>
|
||||
<span class="qrcode"></span>
|
||||
{% else %}
|
||||
<p>If you're interested, you can contact me at:</p>
|
||||
{% endif %}
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<p>If you're interested, you can contact me at:</p>
|
||||
{% endif %}
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
#}
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
|
||||
19
prepare-labs/templates/cards.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
cards_template: cards.html
|
||||
paper_size: Letter
|
||||
url: https://2024-11-qconsf.container.training
|
||||
event: workshop
|
||||
backside: |
|
||||
<div class="qrcode"></div>
|
||||
<p>
|
||||
Thanks for attending the Asynchronous Architecture Patterns workshop at QCON!
|
||||
</p>
|
||||
<p>
|
||||
<b>This QR code will give you my contact info</b> as well as a link to a feedback form.
|
||||
</p>
|
||||
<p>
|
||||
If you liked this workshop, I can train your team, in person or online, with custom
|
||||
courses of any length and any level, on Docker, Kubernetes, and MLops.
|
||||
</p>
|
||||
qrcode: https://2024-11-qconsf.container.training/#contact
|
||||
thing: Kubernetes cluster
|
||||
image: logo-kubernetes.png
|
||||
2
prepare-labs/terraform/list-locations/exoscale
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
exo zone
|
||||
@@ -8,8 +8,8 @@ resource "random_string" "_" {
|
||||
resource "time_static" "_" {}
|
||||
|
||||
locals {
|
||||
min_nodes_per_pool = var.nodes_per_cluster
|
||||
max_nodes_per_pool = var.nodes_per_cluster * 2
|
||||
min_nodes_per_pool = var.min_nodes_per_cluster
|
||||
max_nodes_per_pool = var.max_nodes_per_cluster
|
||||
timestamp = formatdate("YYYY-MM-DD-hh-mm", time_static._.rfc3339)
|
||||
tag = random_string._.result
|
||||
# Common tags to be assigned to all resources
|
||||
|
||||
@@ -14,6 +14,20 @@ provider "kubernetes" {
|
||||
config_path = "./kubeconfig.${index}"
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
alias = "cluster_${index}"
|
||||
kubernetes {
|
||||
config_path = "./kubeconfig.${index}"
|
||||
}
|
||||
}
|
||||
|
||||
# Password used for SSH and code-server access
|
||||
resource "random_string" "shpod_${index}" {
|
||||
length = 6
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
@@ -21,120 +35,57 @@ resource "kubernetes_namespace" "shpod_${index}" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "shpod_${index}" {
|
||||
data "kubernetes_service" "shpod_${index}" {
|
||||
depends_on = [ helm_release.shpod_${index} ]
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
spec {
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "shpod"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "shpod"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
service_account_name = "shpod"
|
||||
container {
|
||||
image = "jpetazzo/shpod"
|
||||
name = "shpod"
|
||||
env {
|
||||
name = "PASSWORD"
|
||||
value = random_string.shpod_${index}.result
|
||||
}
|
||||
lifecycle {
|
||||
post_start {
|
||||
exec {
|
||||
command = [ "sh", "-c", "curl http://myip.enix.org/REMOTE_ADDR > /etc/HOSTIP || true" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
resources {
|
||||
limits = {
|
||||
cpu = "2"
|
||||
memory = "500M"
|
||||
}
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "250M"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
lifecycle {
|
||||
# Folks might alter their shpod Service to expose extra ports.
|
||||
# Don't reset their changes.
|
||||
ignore_changes = [ spec ]
|
||||
}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
app = "shpod"
|
||||
}
|
||||
port {
|
||||
name = "ssh"
|
||||
port = 22
|
||||
target_port = 22
|
||||
}
|
||||
type = "NodePort"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service_account" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
}
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = "cluster-admin"
|
||||
}
|
||||
subject {
|
||||
kind = "ServiceAccount"
|
||||
name = "shpod"
|
||||
namespace = "shpod"
|
||||
}
|
||||
subject {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "Group"
|
||||
name = "shpod-cluster-admins"
|
||||
}
|
||||
|
||||
resource "helm_release" "shpod_${index}" {
|
||||
provider = helm.cluster_${index}
|
||||
repository = "https://shpod.in"
|
||||
chart = "shpod"
|
||||
name = "shpod"
|
||||
namespace = "shpod"
|
||||
create_namespace = false
|
||||
set {
|
||||
name = "service.type"
|
||||
value = "NodePort"
|
||||
}
|
||||
}
|
||||
|
||||
resource "random_string" "shpod_${index}" {
|
||||
length = 6
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
alias = "cluster_${index}"
|
||||
kubernetes {
|
||||
config_path = "./kubeconfig.${index}"
|
||||
set {
|
||||
name = "resources.requests.cpu"
|
||||
value = "100m"
|
||||
}
|
||||
set {
|
||||
name = "resources.requests.memory"
|
||||
value = "500M"
|
||||
}
|
||||
set {
|
||||
name = "resources.limits.cpu"
|
||||
value = "1"
|
||||
}
|
||||
set {
|
||||
name = "resources.limits.memory"
|
||||
value = "1000M"
|
||||
}
|
||||
set {
|
||||
name = "persistentVolume.enabled"
|
||||
value = "true"
|
||||
}
|
||||
set {
|
||||
name = "ssh.password"
|
||||
value = random_string.shpod_${index}.result
|
||||
}
|
||||
set {
|
||||
name = "rbac.cluster.clusterRoles"
|
||||
value = "{cluster-admin}"
|
||||
}
|
||||
set {
|
||||
name = "codeServer.enabled"
|
||||
value = "true"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,6 +107,36 @@ resource "helm_release" "metrics_server_${index}" {
|
||||
}
|
||||
}
|
||||
|
||||
# This section here deserves a little explanation.
|
||||
#
|
||||
# When we access a cluster with shpod (either through SSH or code-server)
|
||||
# there is no kubeconfig file - we simply use "in-cluster" authentication
|
||||
# with a ServiceAccount token. This is a bit unusual, and ideally, I would
|
||||
# prefer to have a "normal" kubeconfig file in the students' shell.
|
||||
#
|
||||
# So what we're doing here, is that we're populating a ConfigMap with
|
||||
# a kubeconfig file; and in the initialization scripts (e.g. bashrc) we
|
||||
# automatically download the kubeconfig file from the ConfigMap and place
|
||||
# it in ~/.kube/kubeconfig.
|
||||
#
|
||||
# But, which kubeconfig file should we use? We could use the "normal"
|
||||
# kubeconfig file that was generated by the provider; but in some cases,
|
||||
# that kubeconfig file might use a token instead of a certificate for
|
||||
# user authentication - and ideally, I would like to have a certificate
|
||||
# so that in the section about auth and RBAC, we can dissect that TLS
|
||||
# certificate and explain where our permissions come from.
|
||||
#
|
||||
# So we're creating a TLS key pair; using the CSR API to issue a user
|
||||
# certificate belongong to a special group; and grant the cluster-admin
|
||||
# role to that group; then we use the kubeconfig file generated by the
|
||||
# provider but override the user with that TLS key pair.
|
||||
#
|
||||
# This is not strictly necessary but it streamlines the lesson on auth.
|
||||
#
|
||||
# Lastly - in the ConfigMap we actually put both the original kubeconfig,
|
||||
# and the one where we injected our new user (just in case we want to
|
||||
# use or look at the original for any reason).
|
||||
|
||||
resource "kubernetes_config_map" "kubeconfig_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
@@ -202,6 +183,23 @@ resource "tls_cert_request" "cluster_admin_${index}" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "shpod_cluster_admin_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod-cluster-admin"
|
||||
}
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = "cluster-admin"
|
||||
}
|
||||
subject {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "Group"
|
||||
name = "shpod-cluster-admins"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_certificate_signing_request_v1" "cluster_admin_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
@@ -217,16 +215,28 @@ resource "kubernetes_certificate_signing_request_v1" "cluster_admin_${index}" {
|
||||
|
||||
%{ endfor ~}
|
||||
|
||||
output "ip_addresses_of_nodes" {
|
||||
output "ips_txt" {
|
||||
value = join("\n", [
|
||||
%{ for index, cluster in clusters ~}
|
||||
join("\t", concat(
|
||||
[
|
||||
random_string.shpod_${index}.result,
|
||||
"ssh -l k8s -p $${kubernetes_service.shpod_${index}.spec[0].port[0].node_port}"
|
||||
],
|
||||
join("\n", concat(
|
||||
split(" ", file("./externalips.${index}"))
|
||||
)),
|
||||
%{ endfor ~}
|
||||
""
|
||||
])
|
||||
}
|
||||
|
||||
output "logins_jsonl" {
|
||||
value = join("\n", [
|
||||
%{ for index, cluster in clusters ~}
|
||||
jsonencode({
|
||||
login = "k8s",
|
||||
password = random_string.shpod_${index}.result,
|
||||
port = data.kubernetes_service.shpod_${index}.spec[0].port[0].node_port,
|
||||
codeServerPort = data.kubernetes_service.shpod_${index}.spec[0].port[1].node_port,
|
||||
ipaddrs = replace(file("./externalips.${index}"), " ", "\t"),
|
||||
}),
|
||||
%{ endfor ~}
|
||||
""
|
||||
])
|
||||
}
|
||||
|
||||
@@ -7,11 +7,16 @@ variable "how_many_clusters" {
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "nodes_per_cluster" {
|
||||
variable "min_nodes_per_cluster" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_cluster" {
|
||||
type = number
|
||||
default = 4
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
|
||||
@@ -1,10 +1,23 @@
|
||||
resource "scaleway_vpc_private_network" "_" {
|
||||
}
|
||||
|
||||
# This is a kind of hack to use a custom security group with Kapsulse.
|
||||
# See https://www.scaleway.com/en/docs/containers/kubernetes/reference-content/secure-cluster-with-private-network/
|
||||
|
||||
resource "scaleway_instance_security_group" "_" {
|
||||
name = "kubernetes ${split("/", scaleway_k8s_cluster._.id)[1]}"
|
||||
inbound_default_policy = "accept"
|
||||
outbound_default_policy = "accept"
|
||||
}
|
||||
|
||||
resource "scaleway_k8s_cluster" "_" {
|
||||
name = var.cluster_name
|
||||
#region = var.location
|
||||
name = var.cluster_name
|
||||
tags = var.common_tags
|
||||
version = local.k8s_version
|
||||
type = "kapsule"
|
||||
cni = "cilium"
|
||||
delete_additional_resources = true
|
||||
private_network_id = scaleway_vpc_private_network._.id
|
||||
}
|
||||
|
||||
resource "scaleway_k8s_pool" "_" {
|
||||
@@ -17,6 +30,7 @@ resource "scaleway_k8s_pool" "_" {
|
||||
max_size = var.max_nodes_per_pool
|
||||
autoscaling = var.max_nodes_per_pool > var.min_nodes_per_pool
|
||||
autohealing = true
|
||||
depends_on = [ scaleway_instance_security_group._ ]
|
||||
}
|
||||
|
||||
data "scaleway_k8s_version" "_" {
|
||||
|
||||
@@ -4,6 +4,7 @@ resource "helm_release" "_" {
|
||||
create_namespace = true
|
||||
repository = "https://charts.loft.sh"
|
||||
chart = "vcluster"
|
||||
version = "0.19.7"
|
||||
set {
|
||||
name = "service.type"
|
||||
value = "NodePort"
|
||||
|
||||
@@ -14,9 +14,9 @@ $ hcloud server-type list | grep shared
|
||||
variable "node_sizes" {
|
||||
type = map(any)
|
||||
default = {
|
||||
S = "cx11"
|
||||
M = "cx21"
|
||||
L = "cx31"
|
||||
S = "cpx11"
|
||||
M = "cpx21"
|
||||
L = "cpx31"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
30
prepare-labs/terraform/providers/proxmox/config.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
variable "proxmox_endpoint" {
|
||||
type = string
|
||||
default = "https://localhost:8006/"
|
||||
}
|
||||
|
||||
variable "proxmox_username" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "proxmox_password" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "proxmox_storage" {
|
||||
type = string
|
||||
default = "local"
|
||||
}
|
||||
|
||||
variable "proxmox_template_node_name" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "proxmox_template_vm_id" {
|
||||
type = number
|
||||
default = null
|
||||
}
|
||||
|
||||
11
prepare-labs/terraform/providers/proxmox/variables.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
# Since node size needs to be a string...
|
||||
# To indicate number of CPUs + RAM, just pass it as a string with a space between them.
|
||||
# RAM is in megabytes.
|
||||
variable "node_sizes" {
|
||||
type = map(any)
|
||||
default = {
|
||||
S = "1 2048"
|
||||
M = "2 4096"
|
||||
L = "3 8192"
|
||||
}
|
||||
}
|
||||
@@ -56,6 +56,7 @@ locals {
|
||||
cluster_name = format("%s-%03d", var.tag, cn[0])
|
||||
node_name = format("%s-%03d-%03d", var.tag, cn[0], cn[1])
|
||||
node_size = lookup(var.node_sizes, var.node_size, var.node_size)
|
||||
node_index = cn[0] * var.nodes_per_cluster + cn[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -71,10 +72,10 @@ resource "local_file" "ip_addresses" {
|
||||
resource "local_file" "clusters" {
|
||||
content = join("", formatlist("%s\n", [
|
||||
for cid in range(1, 1 + var.how_many_clusters) :
|
||||
join(" ",
|
||||
join("\t",
|
||||
[for nid in range(1, 1 + var.nodes_per_cluster) :
|
||||
local.ip_addresses[format("c%03dn%03d", cid, nid)]
|
||||
])]))
|
||||
filename = "clusters.txt"
|
||||
filename = "clusters.tsv"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ data "openstack_images_image_v2" "_" {
|
||||
most_recent = true
|
||||
properties = {
|
||||
os = "ubuntu"
|
||||
version = "22.04"
|
||||
version = "24.04"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1
prepare-labs/terraform/virtual-machines/proxmox/common.tf
Symbolic link
@@ -0,0 +1 @@
|
||||
../common.tf
|
||||
1
prepare-labs/terraform/virtual-machines/proxmox/config.tf
Symbolic link
@@ -0,0 +1 @@
|
||||
../../providers/proxmox/config.tf
|
||||
79
prepare-labs/terraform/virtual-machines/proxmox/main.tf
Normal file
@@ -0,0 +1,79 @@
|
||||
data "proxmox_virtual_environment_nodes" "_" {}
|
||||
|
||||
locals {
|
||||
pve_nodes = data.proxmox_virtual_environment_nodes._.names
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "_" {
|
||||
node_name = local.pve_nodes[each.value.node_index % length(local.pve_nodes)]
|
||||
for_each = local.nodes
|
||||
name = each.value.node_name
|
||||
tags = ["container.training", var.tag]
|
||||
stop_on_destroy = true
|
||||
cpu {
|
||||
cores = split(" ", each.value.node_size)[0]
|
||||
type = "x86-64-v2-AES" # recommended for modern CPUs
|
||||
}
|
||||
memory {
|
||||
dedicated = split(" ", each.value.node_size)[1]
|
||||
}
|
||||
#disk {
|
||||
# datastore_id = var.proxmox_storage
|
||||
# file_id = proxmox_virtual_environment_file._.id
|
||||
# interface = "scsi0"
|
||||
# size = 30
|
||||
# discard = "on"
|
||||
#}
|
||||
clone {
|
||||
vm_id = var.proxmox_template_vm_id
|
||||
node_name = var.proxmox_template_node_name
|
||||
full = false
|
||||
}
|
||||
agent {
|
||||
enabled = true
|
||||
}
|
||||
initialization {
|
||||
datastore_id = var.proxmox_storage
|
||||
user_account {
|
||||
username = "ubuntu"
|
||||
keys = [trimspace(tls_private_key.ssh.public_key_openssh)]
|
||||
}
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = "dhcp"
|
||||
#gateway =
|
||||
}
|
||||
}
|
||||
}
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
}
|
||||
|
||||
#resource "proxmox_virtual_environment_download_file" "ubuntu_2404_20250115" {
|
||||
# content_type = "iso"
|
||||
# datastore_id = "cephfs"
|
||||
# node_name = "pve-lsd-1"
|
||||
# url = "https://cloud-images.ubuntu.com/releases/24.04/release-20250115/ubuntu-24.04-server-cloudimg-amd64.img"
|
||||
# file_name = "ubuntu_2404_20250115.img"
|
||||
#}
|
||||
#
|
||||
#resource "proxmox_virtual_environment_file" "_" {
|
||||
# datastore_id = "cephfs"
|
||||
# node_name = "pve-lsd-1"
|
||||
# source_file {
|
||||
# path = "/root/noble-server-cloudimg-amd64.img"
|
||||
# }
|
||||
#}
|
||||
|
||||
locals {
|
||||
ip_addresses = {
|
||||
for key, value in local.nodes :
|
||||
key => [for addr in flatten(concat(proxmox_virtual_environment_vm._[key].ipv4_addresses, ["ERROR"])) :
|
||||
addr if addr != "127.0.0.1"][0]
|
||||
}
|
||||
}
|
||||
|
||||
15
prepare-labs/terraform/virtual-machines/proxmox/provider.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "~> 0.70.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
endpoint = var.proxmox_endpoint
|
||||
username = var.proxmox_username
|
||||
password = var.proxmox_password
|
||||
insecure = true
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
# If you want to deploy to Proxmox, you need to:
|
||||
# 1) copy that file to e.g. myproxmoxcluster.tfvars
|
||||
# 2) make sure you have a VM template with QEMU agent pre-installed
|
||||
# 3) customize the copy (you need to replace all the CHANGEME values)
|
||||
# 4) deploy with "labctl create --provider proxmox/myproxmoxcluster ..."
|
||||
|
||||
proxmox_endpoint = "https://localhost:8006/"
|
||||
proxmox_username = "terraform@pve"
|
||||
proxmox_password = "CHANGEME"
|
||||
|
||||
# Which storage to use for VM disks. Defaults to "local".
|
||||
#proxmox_storage = "ceph"
|
||||
|
||||
proxmox_template_node_name = "CHANGEME"
|
||||
proxmox_template_vm_id = CHANGEME
|
||||
|
||||
|
||||
1
prepare-labs/terraform/virtual-machines/proxmox/variables.tf
Symbolic link
@@ -0,0 +1 @@
|
||||
../../providers/proxmox/variables.tf
|
||||
555
prepare-labs/www/logo-bento.svg
Normal file
|
After Width: | Height: | Size: 81 KiB |
BIN
prepare-labs/www/logo-kubernetes.png
Normal file
|
After Width: | Height: | Size: 31 KiB |
1
prepare-labs/www/qrcode.min.js
vendored
Normal file
@@ -16,7 +16,7 @@
|
||||
|
||||
# Shortlinks for next training in English and French
|
||||
#/next https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
|
||||
/next https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
|
||||
/next https://qconsf.com/training/nov2024/asynchronous-architecture-patterns-scale-ml-and-other-high-latency-workloads
|
||||
/hi5 https://enix.io/fr/services/formation/online/
|
||||
/us https://www.ardanlabs.com/live-training-events/deploying-microservices-and-traditional-applications-with-kubernetes-march-28-2022.html
|
||||
/uk https://skillsmatter.com/workshops/827-deploying-microservices-and-traditional-applications-with-kubernetes-with-jerome-petazzoni
|
||||
|
||||
814
slides/autopilot/package-lock.json
generated
@@ -2,8 +2,8 @@
|
||||
"name": "container-training-pub-sub-server",
|
||||
"version": "0.0.1",
|
||||
"dependencies": {
|
||||
"express": "^4.16.2",
|
||||
"socket.io": "^4.6.1",
|
||||
"socket.io-client": "^4.5.1"
|
||||
"express": "^4.21.1",
|
||||
"socket.io": "^4.8.0",
|
||||
"socket.io-client": "^4.7.5"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
www:
|
||||
image: nginx
|
||||
@@ -40,7 +40,7 @@
|
||||
|
||||
- In multi-stage builds, all stages can be built in parallel
|
||||
|
||||
(example: https://github.com/jpetazzo/shpod; [before] and [after])
|
||||
(example: https://github.com/jpetazzo/shpod; [before][shpod-before-parallel] and [after][shpod-after-parallel])
|
||||
|
||||
- Stages are built only when they are necessary
|
||||
|
||||
@@ -50,8 +50,8 @@
|
||||
|
||||
- Files are cached in the builder
|
||||
|
||||
[before]: https://github.com/jpetazzo/shpod/blob/c6efedad6d6c3dc3120dbc0ae0a6915f85862474/Dockerfile
|
||||
[after]: https://github.com/jpetazzo/shpod/blob/d20887bbd56b5fcae2d5d9b0ce06cae8887caabf/Dockerfile
|
||||
[shpod-before-parallel]: https://github.com/jpetazzo/shpod/blob/c6efedad6d6c3dc3120dbc0ae0a6915f85862474/Dockerfile
|
||||
[shpod-after-parallel]: https://github.com/jpetazzo/shpod/blob/d20887bbd56b5fcae2d5d9b0ce06cae8887caabf/Dockerfile
|
||||
|
||||
---
|
||||
|
||||
@@ -121,10 +121,10 @@ docker buildx build … \
|
||||
|
||||
- Must not use binary downloads with hard-coded architectures!
|
||||
|
||||
(streamlining a Dockerfile for multi-arch: [before], [after])
|
||||
(streamlining a Dockerfile for multi-arch: [before][shpod-before-multiarch], [after][shpod-after-multiarch])
|
||||
|
||||
[before]: https://github.com/jpetazzo/shpod/blob/d20887bbd56b5fcae2d5d9b0ce06cae8887caabf/Dockerfile
|
||||
[after]: https://github.com/jpetazzo/shpod/blob/c50789e662417b34fea6f5e1d893721d66d265b7/Dockerfile
|
||||
[shpod-before-multiarch]: https://github.com/jpetazzo/shpod/blob/d20887bbd56b5fcae2d5d9b0ce06cae8887caabf/Dockerfile
|
||||
[shpod-after-multiarch]: https://github.com/jpetazzo/shpod/blob/c50789e662417b34fea6f5e1d893721d66d265b7/Dockerfile
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ Compose enables a simple, powerful onboarding workflow:
|
||||
|
||||
1. Checkout our code.
|
||||
|
||||
2. Run `docker-compose up`.
|
||||
2. Run `docker compose up`.
|
||||
|
||||
3. Our app is up and running!
|
||||
|
||||
@@ -66,19 +66,19 @@ class: pic
|
||||
|
||||
1. Write Dockerfiles
|
||||
|
||||
2. Describe our stack of containers in a YAML file called `docker-compose.yml`
|
||||
2. Describe our stack of containers in a YAML file (the "Compose file")
|
||||
|
||||
3. `docker-compose up` (or `docker-compose up -d` to run in the background)
|
||||
3. `docker compose up` (or `docker compose up -d` to run in the background)
|
||||
|
||||
4. Compose pulls and builds the required images, and starts the containers
|
||||
|
||||
5. Compose shows the combined logs of all the containers
|
||||
|
||||
(if running in the background, use `docker-compose logs`)
|
||||
(if running in the background, use `docker compose logs`)
|
||||
|
||||
6. Hit Ctrl-C to stop the whole stack
|
||||
|
||||
(if running in the background, use `docker-compose stop`)
|
||||
(if running in the background, use `docker compose stop`)
|
||||
|
||||
---
|
||||
|
||||
@@ -86,11 +86,11 @@ class: pic
|
||||
|
||||
After making changes to our source code, we can:
|
||||
|
||||
1. `docker-compose build` to rebuild container images
|
||||
1. `docker compose build` to rebuild container images
|
||||
|
||||
2. `docker-compose up` to restart the stack with the new images
|
||||
2. `docker compose up` to restart the stack with the new images
|
||||
|
||||
We can also combine both with `docker-compose up --build`
|
||||
We can also combine both with `docker compose up --build`
|
||||
|
||||
Compose will be smart, and only recreate the containers that have changed.
|
||||
|
||||
@@ -114,7 +114,7 @@ cd trainingwheels
|
||||
Second step: start the app.
|
||||
|
||||
```bash
|
||||
docker-compose up
|
||||
docker compose up
|
||||
```
|
||||
|
||||
Watch Compose build and run the app.
|
||||
@@ -141,7 +141,17 @@ After ten seconds (or if we press `^C` again) it will forcibly kill them.
|
||||
|
||||
---
|
||||
|
||||
## The `docker-compose.yml` file
|
||||
## The Compose file
|
||||
|
||||
* Historically: docker-compose.yml or .yaml
|
||||
|
||||
* Recently (kind of): can also be named compose.yml or .yaml
|
||||
|
||||
(Since [version 1.28.6, March 2021](https://docs.docker.com/compose/releases/release-notes/#1286))
|
||||
|
||||
---
|
||||
|
||||
## Example
|
||||
|
||||
Here is the file used in the demo:
|
||||
|
||||
@@ -172,10 +182,10 @@ services:
|
||||
|
||||
A Compose file has multiple sections:
|
||||
|
||||
* `version` is mandatory. (Typically use "3".)
|
||||
|
||||
* `services` is mandatory. Each service corresponds to a container.
|
||||
|
||||
* `version` is optional (it used to be mandatory). It can be ignored.
|
||||
|
||||
* `networks` is optional and indicates to which networks containers should be connected.
|
||||
<br/>(By default, containers will be connected on a private, per-compose-file network.)
|
||||
|
||||
@@ -183,24 +193,24 @@ A Compose file has multiple sections:
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compose file versions
|
||||
|
||||
* Version 1 is legacy and shouldn't be used.
|
||||
|
||||
(If you see a Compose file without `version` and `services`, it's a legacy v1 file.)
|
||||
(If you see a Compose file without a `services` block, it's a legacy v1 file.)
|
||||
|
||||
* Version 2 added support for networks and volumes.
|
||||
|
||||
* Version 3 added support for deployment options (scaling, rolling updates, etc).
|
||||
|
||||
* Typically use `version: "3"`.
|
||||
|
||||
The [Docker documentation](https://docs.docker.com/compose/compose-file/)
|
||||
has excellent information about the Compose file format if you need to know more about versions.
|
||||
|
||||
---
|
||||
|
||||
## Containers in `docker-compose.yml`
|
||||
## Containers in Compose file
|
||||
|
||||
Each service in the YAML file must contain either `build`, or `image`.
|
||||
|
||||
@@ -278,7 +288,7 @@ For the full list, check: https://docs.docker.com/compose/compose-file/
|
||||
|
||||
`frontcopy_www`, `frontcopy_www_1`, `frontcopy_db_1`
|
||||
|
||||
- Alternatively, use `docker-compose -p frontcopy`
|
||||
- Alternatively, use `docker compose -p frontcopy`
|
||||
|
||||
(to set the `--project-name` of a stack, which default to the dir name)
|
||||
|
||||
@@ -288,10 +298,10 @@ For the full list, check: https://docs.docker.com/compose/compose-file/
|
||||
|
||||
## Checking stack status
|
||||
|
||||
We have `ps`, `docker ps`, and similarly, `docker-compose ps`:
|
||||
We have `ps`, `docker ps`, and similarly, `docker compose ps`:
|
||||
|
||||
```bash
|
||||
$ docker-compose ps
|
||||
$ docker compose ps
|
||||
Name Command State Ports
|
||||
----------------------------------------------------------------------------
|
||||
trainingwheels_redis_1 /entrypoint.sh red Up 6379/tcp
|
||||
@@ -310,13 +320,13 @@ If you have started your application in the background with Compose and
|
||||
want to stop it easily, you can use the `kill` command:
|
||||
|
||||
```bash
|
||||
$ docker-compose kill
|
||||
$ docker compose kill
|
||||
```
|
||||
|
||||
Likewise, `docker-compose rm` will let you remove containers (after confirmation):
|
||||
Likewise, `docker compose rm` will let you remove containers (after confirmation):
|
||||
|
||||
```bash
|
||||
$ docker-compose rm
|
||||
$ docker compose rm
|
||||
Going to remove trainingwheels_redis_1, trainingwheels_www_1
|
||||
Are you sure? [yN] y
|
||||
Removing trainingwheels_redis_1...
|
||||
@@ -327,19 +337,19 @@ Removing trainingwheels_www_1...
|
||||
|
||||
## Cleaning up (2)
|
||||
|
||||
Alternatively, `docker-compose down` will stop and remove containers.
|
||||
Alternatively, `docker compose down` will stop and remove containers.
|
||||
|
||||
It will also remove other resources, like networks that were created for the application.
|
||||
|
||||
```bash
|
||||
$ docker-compose down
|
||||
$ docker compose down
|
||||
Stopping trainingwheels_www_1 ... done
|
||||
Stopping trainingwheels_redis_1 ... done
|
||||
Removing trainingwheels_www_1 ... done
|
||||
Removing trainingwheels_redis_1 ... done
|
||||
```
|
||||
|
||||
Use `docker-compose down -v` to remove everything including volumes.
|
||||
Use `docker compose down -v` to remove everything including volumes.
|
||||
|
||||
---
|
||||
|
||||
@@ -369,15 +379,15 @@ Use `docker-compose down -v` to remove everything including volumes.
|
||||
|
||||
- If the container is deleted, the volume gets orphaned
|
||||
|
||||
- Example: `docker-compose down && docker-compose up`
|
||||
- Example: `docker compose down && docker compose up`
|
||||
|
||||
- the old volume still exists, detached from its container
|
||||
|
||||
- a new volume gets created
|
||||
|
||||
- `docker-compose down -v`/`--volumes` deletes volumes
|
||||
- `docker compose down -v`/`--volumes` deletes volumes
|
||||
|
||||
(but **not** `docker-compose down && docker-compose down -v`!)
|
||||
(but **not** `docker compose down && docker compose down -v`!)
|
||||
|
||||
---
|
||||
|
||||
@@ -396,9 +406,9 @@ volumes:
|
||||
|
||||
- Volume will be named `<project>_data`
|
||||
|
||||
- It won't be orphaned with `docker-compose down`
|
||||
- It won't be orphaned with `docker compose down`
|
||||
|
||||
- It will correctly be removed with `docker-compose down -v`
|
||||
- It will correctly be removed with `docker compose down -v`
|
||||
|
||||
---
|
||||
|
||||
@@ -417,7 +427,7 @@ services:
|
||||
|
||||
(for migration, backups, disk usage accounting...)
|
||||
|
||||
- Won't be removed by `docker-compose down -v`
|
||||
- Won't be removed by `docker compose down -v`
|
||||
|
||||
---
|
||||
|
||||
@@ -451,7 +461,7 @@ services:
|
||||
|
||||
- This is used when bringing up individual services
|
||||
|
||||
(e.g. `docker-compose up blah` or `docker-compose run foo`)
|
||||
(e.g. `docker compose up blah` or `docker compose run foo`)
|
||||
|
||||
⚠️ It doesn't make a service "wait" for another one to be up!
|
||||
|
||||
@@ -471,7 +481,9 @@ class: extra-details
|
||||
|
||||
- `docker compose` command to deploy Compose stacks to some clouds
|
||||
|
||||
- progressively getting feature parity with `docker-compose`
|
||||
- in Go instead of Python
|
||||
|
||||
- progressively getting feature parity with `docker compose`
|
||||
|
||||
- also provides numerous improvements (e.g. leverages BuildKit by default)
|
||||
|
||||
|
||||
@@ -120,11 +120,11 @@ class: extra-details
|
||||
|
||||
(and won't end up in the resulting image)
|
||||
|
||||
- See the [documentation] for the little details
|
||||
- See the [documentation][dockerignore] for the little details
|
||||
|
||||
(exceptions can be made with `!`, multiple directory levels with `**`...)
|
||||
|
||||
[documentation]: https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||
[dockerignore]: https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Exercise — Ingress
|
||||
## Exercise — Ingress Controller
|
||||
|
||||
- Add an ingress controller to a Kubernetes cluster
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Exercise — Ingress
|
||||
# Exercise — Ingress Controller
|
||||
|
||||
- We want to expose a couple of web apps through an ingress controller
|
||||
|
||||
@@ -128,4 +128,4 @@ This is similar to the previous scenario, but with two significant changes:
|
||||
|
||||
1. We only want to run the ingress controller on nodes that have the role `ingress`.
|
||||
|
||||
2. We don't want to use `hostNetwork`, but a list of `externalIPs` instead.
|
||||
2. We want to either use `hostPort`, or a list of `externalIPs` (not `hostNetwork`).
|
||||
51
slides/exercises/monokube-details.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Exercise — Monokube static pods
|
||||
|
||||
- We want to run a very basic Kubernetes cluster by starting only:
|
||||
|
||||
- kubelet
|
||||
|
||||
- a container engine (e.g. Docker)
|
||||
|
||||
- The other components (control plane and otherwise) should be started with:
|
||||
|
||||
- static pods
|
||||
|
||||
- "classic" manifests loaded with e.g. `kubectl apply`
|
||||
|
||||
- This should be done with the "monokube" VM
|
||||
|
||||
(which has Docker and kubelet 1.19 binaries available)
|
||||
|
||||
---
|
||||
|
||||
## Images to use
|
||||
|
||||
Here are some suggestions of images:
|
||||
|
||||
- etcd → `quay.io/coreos/etcd:vX.Y.Z`
|
||||
|
||||
- Kubernetes components → `registry.k8s.io/kube-XXX:vX.Y.Z`
|
||||
|
||||
(where `XXX` = `apiserver`, `scheduler`, `controller-manager`)
|
||||
|
||||
To know which versions to use, check the version of the binaries installed on the `monokube` VM, and use the same ones.
|
||||
|
||||
See next slide for more hints!
|
||||
|
||||
---
|
||||
|
||||
## Inventory
|
||||
|
||||
We'll need to run:
|
||||
|
||||
- kubelet (with the flag for static pod manifests)
|
||||
|
||||
- Docker
|
||||
|
||||
- static pods for control plane components
|
||||
|
||||
(suggestion: use `hostNetwork`)
|
||||
|
||||
- static pod or DaemonSet for `kube-proxy`
|
||||
|
||||
(will require a privileged security context)
|
||||
@@ -1,6 +1,6 @@
|
||||
# Exercise — Network Policies
|
||||
|
||||
We want to to implement a generic network security mechanism.
|
||||
We want to implement a generic network security mechanism.
|
||||
|
||||
Instead of creating one policy per service, we want to
|
||||
create a fixed number of policies, and use a single label
|
||||
|
||||
11
slides/exercises/polykuberbac-brief.md
Normal file
@@ -0,0 +1,11 @@
|
||||
## Exercise — Enable RBAC
|
||||
|
||||
- Enable RBAC on a manually-deployed control plane
|
||||
|
||||
- This involves:
|
||||
|
||||
- generating different certificates
|
||||
|
||||
- distributing the certificates to the controllers
|
||||
|
||||
- enabling the proper authorizers in API server
|
||||
117
slides/exercises/polykuberbac-details.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# Exercise — Enable RBAC
|
||||
|
||||
- We want to enable RBAC on the "polykube" cluster
|
||||
|
||||
(it doesn't matter whether we have 1 or multiple nodes)
|
||||
|
||||
- Ideally, we want to have, for instance:
|
||||
|
||||
- one key, certificate, and kubeconfig for a cluster admin
|
||||
|
||||
- one key, certificate, and kubeconfig for a user
|
||||
<br/>
|
||||
(with permissions in a single namespace)
|
||||
|
||||
- Bonus points: enable the NodeAuthorizer too!
|
||||
|
||||
- Check the following slides for hints
|
||||
|
||||
---
|
||||
|
||||
## Step 1
|
||||
|
||||
- Enable RBAC itself!
|
||||
|
||||
--
|
||||
|
||||
- This is done with an API server command-line flag
|
||||
|
||||
--
|
||||
|
||||
- Check [the documentation][kube-apiserver-doc] to see the flag
|
||||
|
||||
--
|
||||
|
||||
- For now, only enable `--authorization-mode=RBAC`
|
||||
|
||||
[kube-apiserver-doc]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
|
||||
|
||||
---
|
||||
|
||||
## Step 2
|
||||
|
||||
- Our certificate doesn't work anymore, we need to generate a new one
|
||||
|
||||
--
|
||||
|
||||
- We need a certificate that will have *some* (ideally *all*) permissions
|
||||
|
||||
--
|
||||
|
||||
- Two options:
|
||||
|
||||
- use the equivalent of "root" (identity that completely skips permission checks)
|
||||
|
||||
- a "non-root" identity but which is granted permissions with RBAC
|
||||
|
||||
--
|
||||
|
||||
- The "non-root" option looks nice, but to grant permissions, we need permissions
|
||||
|
||||
- So let's start with the equivalent of "root"!
|
||||
|
||||
--
|
||||
|
||||
- The Kubernetes equivalent of `root` is the group `system:masters`
|
||||
|
||||
---
|
||||
|
||||
## Step 2, continued
|
||||
|
||||
- We need to generate a certificate for a user belonging to group `system:masters`
|
||||
|
||||
--
|
||||
|
||||
- In Kubernetes certificates, groups are encoded with the "organization" field
|
||||
|
||||
--
|
||||
|
||||
- That corresponds to `O=system:masters`
|
||||
|
||||
--
|
||||
|
||||
- In other words we need to generate a new certificate, but with a subject of:
|
||||
|
||||
`/CN=admin/O=system:masters/` (the `CN` doesn't matter)
|
||||
|
||||
- That certificate should be able to interact with the API server, like before
|
||||
|
||||
---
|
||||
|
||||
## Step 3
|
||||
|
||||
- Now, all our controllers have permissions issues
|
||||
|
||||
- We need to either:
|
||||
|
||||
- use that `system:masters` cert everywhere
|
||||
|
||||
- generate different certs for every controller, with the proper identities
|
||||
|
||||
- Suggestion: use `system-masters` everywhere to begin with
|
||||
|
||||
(and make sure the cluster is back on its feet)
|
||||
|
||||
---
|
||||
|
||||
## Step 4
|
||||
|
||||
At this point, there are two possible forks in the road:
|
||||
|
||||
1. Generate certs for the control plane controllers
|
||||
|
||||
(`kube-controller-manager`, `kube-scheduler`)
|
||||
|
||||
2. Generate cert(s) for the node(s) and enable `NodeAuthorizer`
|
||||
|
||||
Good luck!
|
||||
7
slides/exercises/reqlim-brief.md
Normal file
@@ -0,0 +1,7 @@
|
||||
## Exercise — Requests and Limits
|
||||
|
||||
- Check current resource allocation and utilization
|
||||
|
||||
- Make sure that all workloads have requests (and perhaps limits)
|
||||
|
||||
- Make sure that all *future* workloads too!
|
||||
55
slides/exercises/reqlim-details.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Exercise — Requests and Limits
|
||||
|
||||
By default, if we don't specify *resource requests*,
|
||||
our workloads will run in `BestEffort` quality of service.
|
||||
|
||||
`BestEffort` is very bad for production workloads,
|
||||
because the scheduler has no idea of the actual resource
|
||||
requirements of our apps, and won't be able to make
|
||||
smart decisions about workload placement.
|
||||
|
||||
As a result, when the cluster gets overloaded,
|
||||
containers will be killed, pods will be evicted,
|
||||
and service disruptions will happen.
|
||||
|
||||
Let's solve this!
|
||||
|
||||
---
|
||||
|
||||
## Check current state
|
||||
|
||||
- Check *allocations*
|
||||
|
||||
(i.e. which pods have requests and limits for CPU and memory)
|
||||
|
||||
- Then check *utilization*
|
||||
|
||||
(i.e. actual resource usage)
|
||||
|
||||
- Possible tools: `kubectl`, plugins like `view-allocations`, Prometheus...
|
||||
|
||||
---
|
||||
|
||||
## Follow best practices
|
||||
|
||||
- We want to make sure that *all* workloads have requests
|
||||
|
||||
(and perhaps limits, too!)
|
||||
|
||||
- Depending on the workload:
|
||||
|
||||
- edit its YAML manifest
|
||||
|
||||
- adjust its Helm values
|
||||
|
||||
- add LimitRange in its Namespace
|
||||
|
||||
- Then check again to confirm that the job has been done properly!
|
||||
|
||||
---
|
||||
|
||||
## Be future-proof!
|
||||
|
||||
- We want to make sure that *future* workloads will have requests, too
|
||||
|
||||
- How can that be implemented?
|
||||
86
slides/exercises/yaml-bluegreen-details.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# Exercise — Writing blue/green YAML
|
||||
|
||||
- We want to author YAML manifests for the "color" app
|
||||
|
||||
(use image `jpetazzo/color` or `ghcr.io/jpetazzo/color`)
|
||||
|
||||
- That app serves web requests on port 80
|
||||
|
||||
- We want to deploy two instances of that app (`blue` and `green`)
|
||||
|
||||
- We want to expose the app with a service named `front`, such that:
|
||||
|
||||
90% of the requests are sent to `blue`, and 10% to `green`
|
||||
|
||||
---
|
||||
|
||||
## End goal
|
||||
|
||||
- We want to be able to do something like:
|
||||
```bash
|
||||
kubectl apply -f blue-green-demo.yaml
|
||||
```
|
||||
|
||||
- Then connect to the `front` service and see responses from `blue` and `green`
|
||||
|
||||
- Then measure e.g. on 100 requests how many go to `blue` and `green`
|
||||
|
||||
(we want a 90/10 traffic split)
|
||||
|
||||
- Go ahead, or check the next slides for hints!
|
||||
|
||||
---
|
||||
|
||||
## Step 1
|
||||
|
||||
- Test the app in isolation:
|
||||
|
||||
- create a Deployment called `blue`
|
||||
|
||||
- expose it with a Service
|
||||
|
||||
- connect to the service and see a "blue" reply
|
||||
|
||||
- If you use a `ClusterIP` service:
|
||||
|
||||
- if you're logged directly on the clusters you can connect directly
|
||||
|
||||
- otherwise you can use `kubectl port-forward`
|
||||
|
||||
- Otherwise, you can use a `NodePort` or `LoadBalancer` service
|
||||
|
||||
---
|
||||
|
||||
## Step 2
|
||||
|
||||
- Add the `green` Deployment
|
||||
|
||||
- Create the `front` service
|
||||
|
||||
- Edit the `front` service to replace its selector with a custom one
|
||||
|
||||
- Edit `blue` and `green` to add the label(s) of your custom selector
|
||||
|
||||
- Check that traffic hits both green and blue
|
||||
|
||||
- Think about how to obtain the 90/10 traffic split
|
||||
|
||||
---
|
||||
|
||||
## Step 3
|
||||
|
||||
- Generate, write, extract, ... YAML manifests for all components
|
||||
|
||||
(`blue` and `green` Deployments, `front` Service)
|
||||
|
||||
- Check that applying the manifests (e.g. in a brand new namespace) works
|
||||
|
||||
- Bonus points: add a one-shot pod to check the traffic split!
|
||||
|
||||
---
|
||||
|
||||
## Discussion
|
||||
|
||||
- Would this be a viable option to obtain, say, a 95% / 5% traffic split?
|
||||
|
||||
- What about 99% / 1 %?
|
||||
5
slides/find-duplicate-markdown-links.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
for LINK in $(cat */*.md | sed -n 's/^\[\(.*\)\]:.*/\1/p' | sort | uniq -d); do
|
||||
grep '^\['"$LINK"'\]:' */*.md
|
||||
done
|
||||
|
||||
BIN
slides/images/M6-R01-config-files.png
Normal file
|
After Width: | Height: | Size: 74 KiB |
BIN
slides/images/M6-cluster-multi-tenants.png
Normal file
|
After Width: | Height: | Size: 73 KiB |
BIN
slides/images/M6-flux-config-dependencies.png
Normal file
|
After Width: | Height: | Size: 186 KiB |
BIN
slides/images/M6-flux-config-files.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
slides/images/M6-flux-controllers.png
Normal file
|
After Width: | Height: | Size: 221 KiB |
BIN
slides/images/M6-github-add-token.jpg
Normal file
|
After Width: | Height: | Size: 69 KiB |
BIN
slides/images/M6-github-teams.png
Normal file
|
After Width: | Height: | Size: 162 KiB |
BIN
slides/images/M6-grafana-dashboard.png
Normal file
|
After Width: | Height: | Size: 570 KiB |
BIN
slides/images/M6-incorrect-dataset-in-MOVY-app.png
Normal file
|
After Width: | Height: | Size: 278 KiB |
BIN
slides/images/M6-incorrect-dataset-in-ROCKY-app.png
Normal file
|
After Width: | Height: | Size: 347 KiB |
BIN
slides/images/M6-ingress-nginx-scaleway-lb.png
Normal file
|
After Width: | Height: | Size: 192 KiB |